code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import sys
sys.path.append("..")
import helpers
helpers.mask_busy_gpus(wait=False)
import nltk
import numpy as np
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])
from sklearn.cross_validation import train_test_split
train_data,test_data = train_test_split(data,test_size=0.25,random_state=42)
from collections import Counter
word_counts = Counter()
for sentence in data:
words,tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])
#print(all_words)
#let's measure what fraction of data words are in the dictionary
print("Coverage = %.5f"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))
from collections import defaultdict
word_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})
tag_to_id = {tag:i for i,tag in enumerate(all_tags)}
def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len,lines))
matrix = np.empty([len(lines),max_len],dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]
matrix[i,:len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print("Word ids:")
print(to_matrix(batch_words,word_to_id))
print("Tag ids:")
print(to_matrix(batch_tags,tag_to_id))
import keras
import keras.layers as L
from keras.utils.np_utils import to_categorical
BATCH_SIZE=32
def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):
assert isinstance(sentences,np.ndarray),"Make sure sentences is q numpy array"
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0,len(indices)-1,batch_size):
batch_indices = indices[start:start+batch_size]
batch_words,batch_tags = [],[]
for sent in sentences[batch_indices]:
words,tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words,word_to_id,max_len,pad)
batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)
batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))
yield batch_words,batch_tags_1hot
def compute_test_accuracy(model):
test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)
#predict tag probabilities of shape [batch,time,n_tags]
predicted_tag_probabilities = model.predict(test_words,verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
#compute accurary excluding padding
numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))
denominator = np.sum(test_words != 0)
return float(numerator)/denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs=None):
sys.stdout.flush()
print("\nMeasuring validation accuracy...")
acc = compute_test_accuracy(self.model)
print("\nValidation accuracy: %.5f\n"%acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None],dtype='int32'))
model.add(L.Embedding(len(all_words),50))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
#
#
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.Conv1D(128,2,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,3,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,4,padding='same',activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
#model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam','categorical_crossentropy')
model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=50,)
acc = compute_test_accuracy(model)
print("\nFinal accuracy: %.5f"%acc)
model.save_weights("LSTM_gpu_trained_weights_1layer.h5")
|
normal
|
{
"blob_id": "7f7ebc6d3d69fbb19071c63a9ab235ad01f1d414",
"index": 306,
"step-1": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\n<mask token>\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\n<mask token>\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\n<mask token>\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\n<mask token>\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\n<mask token>\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-4": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',\n 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\ndata = np.array([[(word.lower(), tag) for word, tag in sentence] for\n sentence in data])\n<mask token>\ntrain_data, test_data = train_test_split(data, test_size=0.25, random_state=42)\n<mask token>\nword_counts = Counter()\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\nall_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(\n 10000)))[0])\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\nword_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(\n all_words)})\ntag_to_id = {tag: i for i, tag in enumerate(all_tags)}\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\nbatch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\nBATCH_SIZE = 32\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nstepwise_dense = L.Dense(len(all_tags), activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\nacc = compute_test_accuracy(model)\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-5": "import sys\nsys.path.append(\"..\")\nimport helpers\nhelpers.mask_busy_gpus(wait=False)\n\n\n\nimport nltk\n\nimport numpy as np\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\n\ndata = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])\n\nfrom sklearn.cross_validation import train_test_split\ntrain_data,test_data = train_test_split(data,test_size=0.25,random_state=42)\n\nfrom collections import Counter\nword_counts = Counter()\nfor sentence in data:\n words,tags = zip(*sentence)\n \n word_counts.update(words)\n\nall_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])\n#print(all_words)\n#let's measure what fraction of data words are in the dictionary\nprint(\"Coverage = %.5f\"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))\n\nfrom collections import defaultdict\nword_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})\ntag_to_id = {tag:i for i,tag in enumerate(all_tags)}\n\ndef to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix\n\nbatch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\n\nprint(\"Word ids:\")\nprint(to_matrix(batch_words,word_to_id))\nprint(\"Tag ids:\")\nprint(to_matrix(batch_tags,tag_to_id))\n\nimport keras\nimport keras.layers as L\n\nfrom keras.utils.np_utils import to_categorical\nBATCH_SIZE=32\ndef generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):\n assert isinstance(sentences,np.ndarray),\"Make sure sentences is q numpy array\"\n \n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0,len(indices)-1,batch_size):\n batch_indices = indices[start:start+batch_size]\n batch_words,batch_tags = [],[]\n for sent in sentences[batch_indices]:\n words,tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n\n batch_words = to_matrix(batch_words,word_to_id,max_len,pad)\n batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)\n\n batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))\n yield batch_words,batch_tags_1hot\n \ndef compute_test_accuracy(model):\n test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)\n\n #predict tag probabilities of shape [batch,time,n_tags]\n predicted_tag_probabilities = model.predict(test_words,verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n\n #compute accurary excluding padding\n numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))\n denominator = np.sum(test_words != 0)\n return float(numerator)/denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n def on_epoch_end(self,epoch,logs=None):\n sys.stdout.flush()\n print(\"\\nMeasuring validation accuracy...\")\n acc = compute_test_accuracy(self.model)\n print(\"\\nValidation accuracy: %.5f\\n\"%acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\n\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None],dtype='int32'))\nmodel.add(L.Embedding(len(all_words),50))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#\n\n#\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.Conv1D(128,2,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,3,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,4,padding='same',activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\n#model.add(L.Dropout(0.25))\n\nstepwise_dense = L.Dense(len(all_tags),activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\n\nmodel.summary()\nmodel.compile('adam','categorical_crossentropy')\n\nmodel.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,\n callbacks=[EvaluateAccuracy()], epochs=50,)\n\n\nacc = compute_test_accuracy(model)\nprint(\"\\nFinal accuracy: %.5f\"%acc)\n\nmodel.save_weights(\"LSTM_gpu_trained_weights_1layer.h5\")\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(stats.ttest_ind(a, b))
<|reserved_special_token_0|>
print(stats.ttest_ind(a, b))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = np.random.normal(25.0, 5.0, 10000)
b = np.random.normal(26.0, 5.0, 10000)
print(stats.ttest_ind(a, b))
b = np.random.normal(25.0, 5.0, 10000)
print(stats.ttest_ind(a, b))
<|reserved_special_token_1|>
import numpy as np
from scipy import stats
a = np.random.normal(25.0, 5.0, 10000)
b = np.random.normal(26.0, 5.0, 10000)
print(stats.ttest_ind(a, b))
b = np.random.normal(25.0, 5.0, 10000)
print(stats.ttest_ind(a, b))
<|reserved_special_token_1|>
import numpy as np
from scipy import stats
a = np.random.normal(25.0, 5.0, 10000)
b = np.random.normal(26.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # bad change, with a ery low chance of randomness
b = np.random.normal(25.0, 5.0, 10000)
print(stats.ttest_ind(a, b)) # no change, outcome is likely random
|
flexible
|
{
"blob_id": "ba85f3c8a9e40f30076c13487a97567f7bc646dc",
"index": 8041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(stats.ttest_ind(a, b))\n<mask token>\nprint(stats.ttest_ind(a, b))\n",
"step-3": "<mask token>\na = np.random.normal(25.0, 5.0, 10000)\nb = np.random.normal(26.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\nb = np.random.normal(25.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\n",
"step-4": "import numpy as np\nfrom scipy import stats\na = np.random.normal(25.0, 5.0, 10000)\nb = np.random.normal(26.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\nb = np.random.normal(25.0, 5.0, 10000)\nprint(stats.ttest_ind(a, b))\n",
"step-5": "import numpy as np\r\nfrom scipy import stats\r\n\r\na = np.random.normal(25.0, 5.0, 10000)\r\nb = np.random.normal(26.0, 5.0, 10000)\r\n\r\nprint(stats.ttest_ind(a, b)) # bad change, with a ery low chance of randomness\r\n\r\nb = np.random.normal(25.0, 5.0, 10000)\r\nprint(stats.ttest_ind(a, b)) # no change, outcome is likely random\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Ui_Admin(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Admin(object):
def setupUi(self, Admin):
Admin.setObjectName('Admin')
Admin.resize(679, 490)
self.centralwidget = QtWidgets.QWidget(Admin)
self.centralwidget.setObjectName('centralwidget')
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName('verticalLayout')
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName('horizontalLayout')
self.menu_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.menu_btn.setFont(font)
self.menu_btn.setObjectName('menu_btn')
self.horizontalLayout.addWidget(self.menu_btn)
self.user_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.user_btn.setFont(font)
self.user_btn.setObjectName('user_btn')
self.horizontalLayout.addWidget(self.user_btn)
self.order_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.order_btn.setFont(font)
self.order_btn.setObjectName('order_btn')
self.horizontalLayout.addWidget(self.order_btn)
self.back = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.back.setFont(font)
self.back.setObjectName('back')
self.horizontalLayout.addWidget(self.back)
self.verticalLayout.addLayout(self.horizontalLayout)
self.infoTable = DataTableWidget(self.centralwidget)
self.infoTable.setObjectName('infoTable')
self.verticalLayout.addWidget(self.infoTable)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.save = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.save.setFont(font)
self.save.setObjectName('save')
self.horizontalLayout_2.addWidget(self.save)
self.original = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.original.setFont(font)
self.original.setObjectName('original')
self.horizontalLayout_2.addWidget(self.original)
self.fresh = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.fresh.setFont(font)
self.fresh.setObjectName('fresh')
self.horizontalLayout_2.addWidget(self.fresh)
self.verticalLayout.addLayout(self.horizontalLayout_2)
Admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))
self.menubar.setObjectName('menubar')
self.menu_4 = QtWidgets.QMenu(self.menubar)
self.menu_4.setObjectName('menu_4')
Admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin)
self.statusbar.setObjectName('statusbar')
Admin.setStatusBar(self.statusbar)
self.update1 = QtWidgets.QAction(Admin)
self.update1.setObjectName('update1')
self.add = QtWidgets.QAction(Admin)
self.add.setObjectName('add')
self.update2 = QtWidgets.QAction(Admin)
self.update2.setObjectName('update2')
self.delete_2 = QtWidgets.QAction(Admin)
self.delete_2.setObjectName('delete_2')
self.delete_3 = QtWidgets.QAction(Admin)
self.delete_3.setObjectName('delete_3')
self.add_2 = QtWidgets.QAction(Admin)
self.add_2.setObjectName('add_2')
self.help = QtWidgets.QAction(Admin)
self.help.setObjectName('help')
self.actionAllEmpty = QtWidgets.QAction(Admin)
self.actionAllEmpty.setObjectName('actionAllEmpty')
self.menu_4.addAction(self.help)
self.menubar.addAction(self.menu_4.menuAction())
self.retranslateUi(Admin)
QtCore.QMetaObject.connectSlotsByName(Admin)
self.model = DataFrameModel()
self.infoTable.setModel(self.model)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Admin(object):
def setupUi(self, Admin):
Admin.setObjectName('Admin')
Admin.resize(679, 490)
self.centralwidget = QtWidgets.QWidget(Admin)
self.centralwidget.setObjectName('centralwidget')
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName('verticalLayout')
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName('horizontalLayout')
self.menu_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.menu_btn.setFont(font)
self.menu_btn.setObjectName('menu_btn')
self.horizontalLayout.addWidget(self.menu_btn)
self.user_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.user_btn.setFont(font)
self.user_btn.setObjectName('user_btn')
self.horizontalLayout.addWidget(self.user_btn)
self.order_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.order_btn.setFont(font)
self.order_btn.setObjectName('order_btn')
self.horizontalLayout.addWidget(self.order_btn)
self.back = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.back.setFont(font)
self.back.setObjectName('back')
self.horizontalLayout.addWidget(self.back)
self.verticalLayout.addLayout(self.horizontalLayout)
self.infoTable = DataTableWidget(self.centralwidget)
self.infoTable.setObjectName('infoTable')
self.verticalLayout.addWidget(self.infoTable)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.save = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.save.setFont(font)
self.save.setObjectName('save')
self.horizontalLayout_2.addWidget(self.save)
self.original = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.original.setFont(font)
self.original.setObjectName('original')
self.horizontalLayout_2.addWidget(self.original)
self.fresh = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.fresh.setFont(font)
self.fresh.setObjectName('fresh')
self.horizontalLayout_2.addWidget(self.fresh)
self.verticalLayout.addLayout(self.horizontalLayout_2)
Admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))
self.menubar.setObjectName('menubar')
self.menu_4 = QtWidgets.QMenu(self.menubar)
self.menu_4.setObjectName('menu_4')
Admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin)
self.statusbar.setObjectName('statusbar')
Admin.setStatusBar(self.statusbar)
self.update1 = QtWidgets.QAction(Admin)
self.update1.setObjectName('update1')
self.add = QtWidgets.QAction(Admin)
self.add.setObjectName('add')
self.update2 = QtWidgets.QAction(Admin)
self.update2.setObjectName('update2')
self.delete_2 = QtWidgets.QAction(Admin)
self.delete_2.setObjectName('delete_2')
self.delete_3 = QtWidgets.QAction(Admin)
self.delete_3.setObjectName('delete_3')
self.add_2 = QtWidgets.QAction(Admin)
self.add_2.setObjectName('add_2')
self.help = QtWidgets.QAction(Admin)
self.help.setObjectName('help')
self.actionAllEmpty = QtWidgets.QAction(Admin)
self.actionAllEmpty.setObjectName('actionAllEmpty')
self.menu_4.addAction(self.help)
self.menubar.addAction(self.menu_4.menuAction())
self.retranslateUi(Admin)
QtCore.QMetaObject.connectSlotsByName(Admin)
self.model = DataFrameModel()
self.infoTable.setModel(self.model)
def retranslateUi(self, Admin):
_translate = QtCore.QCoreApplication.translate
Admin.setWindowTitle(_translate('Admin', '后台管理界面'))
self.menu_btn.setText(_translate('Admin', '菜单管理'))
self.user_btn.setText(_translate('Admin', '用户管理'))
self.order_btn.setText(_translate('Admin', '订单信息'))
self.back.setText(_translate('Admin', '返回登录'))
self.save.setText(_translate('Admin', '保存数据'))
self.original.setText(_translate('Admin', '初始化'))
self.fresh.setText(_translate('Admin', '刷新'))
self.menu_4.setTitle(_translate('Admin', '帮助'))
self.update1.setText(_translate('Admin', 'update'))
self.add.setText(_translate('Admin', 'add'))
self.update2.setText(_translate('Admin', 'update'))
self.delete_2.setText(_translate('Admin', 'delete'))
self.delete_3.setText(_translate('Admin', 'delete'))
self.add_2.setText(_translate('Admin', 'add'))
self.help.setText(_translate('Admin', 'help'))
self.actionAllEmpty.setText(_translate('Admin', 'AllEmpty'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
from qtpandas.views.DataTableView import DataTableWidget
from qtpandas.models.DataFrameModel import DataFrameModel
import pandas as pd
class Ui_Admin(object):
def setupUi(self, Admin):
Admin.setObjectName('Admin')
Admin.resize(679, 490)
self.centralwidget = QtWidgets.QWidget(Admin)
self.centralwidget.setObjectName('centralwidget')
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName('verticalLayout')
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName('horizontalLayout')
self.menu_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.menu_btn.setFont(font)
self.menu_btn.setObjectName('menu_btn')
self.horizontalLayout.addWidget(self.menu_btn)
self.user_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.user_btn.setFont(font)
self.user_btn.setObjectName('user_btn')
self.horizontalLayout.addWidget(self.user_btn)
self.order_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.order_btn.setFont(font)
self.order_btn.setObjectName('order_btn')
self.horizontalLayout.addWidget(self.order_btn)
self.back = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.back.setFont(font)
self.back.setObjectName('back')
self.horizontalLayout.addWidget(self.back)
self.verticalLayout.addLayout(self.horizontalLayout)
self.infoTable = DataTableWidget(self.centralwidget)
self.infoTable.setObjectName('infoTable')
self.verticalLayout.addWidget(self.infoTable)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName('horizontalLayout_2')
self.save = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.save.setFont(font)
self.save.setObjectName('save')
self.horizontalLayout_2.addWidget(self.save)
self.original = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.original.setFont(font)
self.original.setObjectName('original')
self.horizontalLayout_2.addWidget(self.original)
self.fresh = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.fresh.setFont(font)
self.fresh.setObjectName('fresh')
self.horizontalLayout_2.addWidget(self.fresh)
self.verticalLayout.addLayout(self.horizontalLayout_2)
Admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))
self.menubar.setObjectName('menubar')
self.menu_4 = QtWidgets.QMenu(self.menubar)
self.menu_4.setObjectName('menu_4')
Admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin)
self.statusbar.setObjectName('statusbar')
Admin.setStatusBar(self.statusbar)
self.update1 = QtWidgets.QAction(Admin)
self.update1.setObjectName('update1')
self.add = QtWidgets.QAction(Admin)
self.add.setObjectName('add')
self.update2 = QtWidgets.QAction(Admin)
self.update2.setObjectName('update2')
self.delete_2 = QtWidgets.QAction(Admin)
self.delete_2.setObjectName('delete_2')
self.delete_3 = QtWidgets.QAction(Admin)
self.delete_3.setObjectName('delete_3')
self.add_2 = QtWidgets.QAction(Admin)
self.add_2.setObjectName('add_2')
self.help = QtWidgets.QAction(Admin)
self.help.setObjectName('help')
self.actionAllEmpty = QtWidgets.QAction(Admin)
self.actionAllEmpty.setObjectName('actionAllEmpty')
self.menu_4.addAction(self.help)
self.menubar.addAction(self.menu_4.menuAction())
self.retranslateUi(Admin)
QtCore.QMetaObject.connectSlotsByName(Admin)
self.model = DataFrameModel()
self.infoTable.setModel(self.model)
def retranslateUi(self, Admin):
_translate = QtCore.QCoreApplication.translate
Admin.setWindowTitle(_translate('Admin', '后台管理界面'))
self.menu_btn.setText(_translate('Admin', '菜单管理'))
self.user_btn.setText(_translate('Admin', '用户管理'))
self.order_btn.setText(_translate('Admin', '订单信息'))
self.back.setText(_translate('Admin', '返回登录'))
self.save.setText(_translate('Admin', '保存数据'))
self.original.setText(_translate('Admin', '初始化'))
self.fresh.setText(_translate('Admin', '刷新'))
self.menu_4.setTitle(_translate('Admin', '帮助'))
self.update1.setText(_translate('Admin', 'update'))
self.add.setText(_translate('Admin', 'add'))
self.update2.setText(_translate('Admin', 'update'))
self.delete_2.setText(_translate('Admin', 'delete'))
self.delete_3.setText(_translate('Admin', 'delete'))
self.add_2.setText(_translate('Admin', 'add'))
self.help.setText(_translate('Admin', 'help'))
self.actionAllEmpty.setText(_translate('Admin', 'AllEmpty'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Admin.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from qtpandas.views.DataTableView import DataTableWidget
from qtpandas.models.DataFrameModel import DataFrameModel
import pandas as pd
class Ui_Admin(object):
def setupUi(self, Admin):
Admin.setObjectName("Admin")
Admin.resize(679, 490)
self.centralwidget = QtWidgets.QWidget(Admin)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.menu_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.menu_btn.setFont(font)
self.menu_btn.setObjectName("menu_btn")
self.horizontalLayout.addWidget(self.menu_btn)
self.user_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.user_btn.setFont(font)
self.user_btn.setObjectName("user_btn")
self.horizontalLayout.addWidget(self.user_btn)
self.order_btn = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.order_btn.setFont(font)
self.order_btn.setObjectName("order_btn")
self.horizontalLayout.addWidget(self.order_btn)
self.back = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.back.setFont(font)
self.back.setObjectName("back")
self.horizontalLayout.addWidget(self.back)
self.verticalLayout.addLayout(self.horizontalLayout)
self.infoTable = DataTableWidget(self.centralwidget)
self.infoTable.setObjectName("infoTable")
self.verticalLayout.addWidget(self.infoTable)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.save = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.save.setFont(font)
self.save.setObjectName("save")
self.horizontalLayout_2.addWidget(self.save)
self.original = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.original.setFont(font)
self.original.setObjectName("original")
self.horizontalLayout_2.addWidget(self.original)
self.fresh = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(15)
self.fresh.setFont(font)
self.fresh.setObjectName("fresh")
self.horizontalLayout_2.addWidget(self.fresh)
self.verticalLayout.addLayout(self.horizontalLayout_2)
Admin.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Admin)
self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))
self.menubar.setObjectName("menubar")
self.menu_4 = QtWidgets.QMenu(self.menubar)
self.menu_4.setObjectName("menu_4")
Admin.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Admin)
self.statusbar.setObjectName("statusbar")
Admin.setStatusBar(self.statusbar)
self.update1 = QtWidgets.QAction(Admin)
self.update1.setObjectName("update1")
self.add = QtWidgets.QAction(Admin)
self.add.setObjectName("add")
self.update2 = QtWidgets.QAction(Admin)
self.update2.setObjectName("update2")
self.delete_2 = QtWidgets.QAction(Admin)
self.delete_2.setObjectName("delete_2")
self.delete_3 = QtWidgets.QAction(Admin)
self.delete_3.setObjectName("delete_3")
self.add_2 = QtWidgets.QAction(Admin)
self.add_2.setObjectName("add_2")
self.help = QtWidgets.QAction(Admin)
self.help.setObjectName("help")
self.actionAllEmpty = QtWidgets.QAction(Admin)
self.actionAllEmpty.setObjectName("actionAllEmpty")
self.menu_4.addAction(self.help)
self.menubar.addAction(self.menu_4.menuAction())
self.retranslateUi(Admin)
QtCore.QMetaObject.connectSlotsByName(Admin)
self.model=DataFrameModel()
self.infoTable.setModel(self.model)
def retranslateUi(self, Admin):
_translate = QtCore.QCoreApplication.translate
Admin.setWindowTitle(_translate("Admin", "后台管理界面"))
self.menu_btn.setText(_translate("Admin", "菜单管理"))
self.user_btn.setText(_translate("Admin", "用户管理"))
self.order_btn.setText(_translate("Admin", "订单信息"))
self.back.setText(_translate("Admin", "返回登录"))
self.save.setText(_translate("Admin", "保存数据"))
self.original.setText(_translate("Admin", "初始化"))
self.fresh.setText(_translate("Admin", "刷新"))
self.menu_4.setTitle(_translate("Admin", "帮助"))
self.update1.setText(_translate("Admin", "update"))
self.add.setText(_translate("Admin", "add"))
self.update2.setText(_translate("Admin", "update"))
self.delete_2.setText(_translate("Admin", "delete"))
self.delete_3.setText(_translate("Admin", "delete"))
self.add_2.setText(_translate("Admin", "add"))
self.help.setText(_translate("Admin", "help"))
self.actionAllEmpty.setText(_translate("Admin", "AllEmpty"))
|
flexible
|
{
"blob_id": "5e2a8e95af88a582b6e760a53dfd41f880d66963",
"index": 2670,
"step-1": "<mask token>\n\n\nclass Ui_Admin(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Admin(object):\n\n def setupUi(self, Admin):\n Admin.setObjectName('Admin')\n Admin.resize(679, 490)\n self.centralwidget = QtWidgets.QWidget(Admin)\n self.centralwidget.setObjectName('centralwidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName('verticalLayout')\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName('horizontalLayout')\n self.menu_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.menu_btn.setFont(font)\n self.menu_btn.setObjectName('menu_btn')\n self.horizontalLayout.addWidget(self.menu_btn)\n self.user_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.user_btn.setFont(font)\n self.user_btn.setObjectName('user_btn')\n self.horizontalLayout.addWidget(self.user_btn)\n self.order_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.order_btn.setFont(font)\n self.order_btn.setObjectName('order_btn')\n self.horizontalLayout.addWidget(self.order_btn)\n self.back = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.back.setFont(font)\n self.back.setObjectName('back')\n self.horizontalLayout.addWidget(self.back)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.infoTable = DataTableWidget(self.centralwidget)\n self.infoTable.setObjectName('infoTable')\n self.verticalLayout.addWidget(self.infoTable)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName('horizontalLayout_2')\n self.save = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.save.setFont(font)\n self.save.setObjectName('save')\n self.horizontalLayout_2.addWidget(self.save)\n self.original = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.original.setFont(font)\n self.original.setObjectName('original')\n self.horizontalLayout_2.addWidget(self.original)\n self.fresh = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.fresh.setFont(font)\n self.fresh.setObjectName('fresh')\n self.horizontalLayout_2.addWidget(self.fresh)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n Admin.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Admin)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))\n self.menubar.setObjectName('menubar')\n self.menu_4 = QtWidgets.QMenu(self.menubar)\n self.menu_4.setObjectName('menu_4')\n Admin.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Admin)\n self.statusbar.setObjectName('statusbar')\n Admin.setStatusBar(self.statusbar)\n self.update1 = QtWidgets.QAction(Admin)\n self.update1.setObjectName('update1')\n self.add = QtWidgets.QAction(Admin)\n self.add.setObjectName('add')\n self.update2 = QtWidgets.QAction(Admin)\n self.update2.setObjectName('update2')\n self.delete_2 = QtWidgets.QAction(Admin)\n self.delete_2.setObjectName('delete_2')\n self.delete_3 = QtWidgets.QAction(Admin)\n self.delete_3.setObjectName('delete_3')\n self.add_2 = QtWidgets.QAction(Admin)\n self.add_2.setObjectName('add_2')\n self.help = QtWidgets.QAction(Admin)\n self.help.setObjectName('help')\n self.actionAllEmpty = QtWidgets.QAction(Admin)\n self.actionAllEmpty.setObjectName('actionAllEmpty')\n self.menu_4.addAction(self.help)\n self.menubar.addAction(self.menu_4.menuAction())\n self.retranslateUi(Admin)\n QtCore.QMetaObject.connectSlotsByName(Admin)\n self.model = DataFrameModel()\n self.infoTable.setModel(self.model)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_Admin(object):\n\n def setupUi(self, Admin):\n Admin.setObjectName('Admin')\n Admin.resize(679, 490)\n self.centralwidget = QtWidgets.QWidget(Admin)\n self.centralwidget.setObjectName('centralwidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName('verticalLayout')\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName('horizontalLayout')\n self.menu_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.menu_btn.setFont(font)\n self.menu_btn.setObjectName('menu_btn')\n self.horizontalLayout.addWidget(self.menu_btn)\n self.user_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.user_btn.setFont(font)\n self.user_btn.setObjectName('user_btn')\n self.horizontalLayout.addWidget(self.user_btn)\n self.order_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.order_btn.setFont(font)\n self.order_btn.setObjectName('order_btn')\n self.horizontalLayout.addWidget(self.order_btn)\n self.back = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.back.setFont(font)\n self.back.setObjectName('back')\n self.horizontalLayout.addWidget(self.back)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.infoTable = DataTableWidget(self.centralwidget)\n self.infoTable.setObjectName('infoTable')\n self.verticalLayout.addWidget(self.infoTable)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName('horizontalLayout_2')\n self.save = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.save.setFont(font)\n self.save.setObjectName('save')\n self.horizontalLayout_2.addWidget(self.save)\n self.original = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.original.setFont(font)\n self.original.setObjectName('original')\n self.horizontalLayout_2.addWidget(self.original)\n self.fresh = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.fresh.setFont(font)\n self.fresh.setObjectName('fresh')\n self.horizontalLayout_2.addWidget(self.fresh)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n Admin.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Admin)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))\n self.menubar.setObjectName('menubar')\n self.menu_4 = QtWidgets.QMenu(self.menubar)\n self.menu_4.setObjectName('menu_4')\n Admin.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Admin)\n self.statusbar.setObjectName('statusbar')\n Admin.setStatusBar(self.statusbar)\n self.update1 = QtWidgets.QAction(Admin)\n self.update1.setObjectName('update1')\n self.add = QtWidgets.QAction(Admin)\n self.add.setObjectName('add')\n self.update2 = QtWidgets.QAction(Admin)\n self.update2.setObjectName('update2')\n self.delete_2 = QtWidgets.QAction(Admin)\n self.delete_2.setObjectName('delete_2')\n self.delete_3 = QtWidgets.QAction(Admin)\n self.delete_3.setObjectName('delete_3')\n self.add_2 = QtWidgets.QAction(Admin)\n self.add_2.setObjectName('add_2')\n self.help = QtWidgets.QAction(Admin)\n self.help.setObjectName('help')\n self.actionAllEmpty = QtWidgets.QAction(Admin)\n self.actionAllEmpty.setObjectName('actionAllEmpty')\n self.menu_4.addAction(self.help)\n self.menubar.addAction(self.menu_4.menuAction())\n self.retranslateUi(Admin)\n QtCore.QMetaObject.connectSlotsByName(Admin)\n self.model = DataFrameModel()\n self.infoTable.setModel(self.model)\n\n def retranslateUi(self, Admin):\n _translate = QtCore.QCoreApplication.translate\n Admin.setWindowTitle(_translate('Admin', '后台管理界面'))\n self.menu_btn.setText(_translate('Admin', '菜单管理'))\n self.user_btn.setText(_translate('Admin', '用户管理'))\n self.order_btn.setText(_translate('Admin', '订单信息'))\n self.back.setText(_translate('Admin', '返回登录'))\n self.save.setText(_translate('Admin', '保存数据'))\n self.original.setText(_translate('Admin', '初始化'))\n self.fresh.setText(_translate('Admin', '刷新'))\n self.menu_4.setTitle(_translate('Admin', '帮助'))\n self.update1.setText(_translate('Admin', 'update'))\n self.add.setText(_translate('Admin', 'add'))\n self.update2.setText(_translate('Admin', 'update'))\n self.delete_2.setText(_translate('Admin', 'delete'))\n self.delete_3.setText(_translate('Admin', 'delete'))\n self.add_2.setText(_translate('Admin', 'add'))\n self.help.setText(_translate('Admin', 'help'))\n self.actionAllEmpty.setText(_translate('Admin', 'AllEmpty'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\nfrom qtpandas.views.DataTableView import DataTableWidget\nfrom qtpandas.models.DataFrameModel import DataFrameModel\nimport pandas as pd\n\n\nclass Ui_Admin(object):\n\n def setupUi(self, Admin):\n Admin.setObjectName('Admin')\n Admin.resize(679, 490)\n self.centralwidget = QtWidgets.QWidget(Admin)\n self.centralwidget.setObjectName('centralwidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName('verticalLayout')\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName('horizontalLayout')\n self.menu_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.menu_btn.setFont(font)\n self.menu_btn.setObjectName('menu_btn')\n self.horizontalLayout.addWidget(self.menu_btn)\n self.user_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.user_btn.setFont(font)\n self.user_btn.setObjectName('user_btn')\n self.horizontalLayout.addWidget(self.user_btn)\n self.order_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.order_btn.setFont(font)\n self.order_btn.setObjectName('order_btn')\n self.horizontalLayout.addWidget(self.order_btn)\n self.back = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.back.setFont(font)\n self.back.setObjectName('back')\n self.horizontalLayout.addWidget(self.back)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.infoTable = DataTableWidget(self.centralwidget)\n self.infoTable.setObjectName('infoTable')\n self.verticalLayout.addWidget(self.infoTable)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName('horizontalLayout_2')\n self.save = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.save.setFont(font)\n self.save.setObjectName('save')\n self.horizontalLayout_2.addWidget(self.save)\n self.original = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.original.setFont(font)\n self.original.setObjectName('original')\n self.horizontalLayout_2.addWidget(self.original)\n self.fresh = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.fresh.setFont(font)\n self.fresh.setObjectName('fresh')\n self.horizontalLayout_2.addWidget(self.fresh)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n Admin.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Admin)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))\n self.menubar.setObjectName('menubar')\n self.menu_4 = QtWidgets.QMenu(self.menubar)\n self.menu_4.setObjectName('menu_4')\n Admin.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Admin)\n self.statusbar.setObjectName('statusbar')\n Admin.setStatusBar(self.statusbar)\n self.update1 = QtWidgets.QAction(Admin)\n self.update1.setObjectName('update1')\n self.add = QtWidgets.QAction(Admin)\n self.add.setObjectName('add')\n self.update2 = QtWidgets.QAction(Admin)\n self.update2.setObjectName('update2')\n self.delete_2 = QtWidgets.QAction(Admin)\n self.delete_2.setObjectName('delete_2')\n self.delete_3 = QtWidgets.QAction(Admin)\n self.delete_3.setObjectName('delete_3')\n self.add_2 = QtWidgets.QAction(Admin)\n self.add_2.setObjectName('add_2')\n self.help = QtWidgets.QAction(Admin)\n self.help.setObjectName('help')\n self.actionAllEmpty = QtWidgets.QAction(Admin)\n self.actionAllEmpty.setObjectName('actionAllEmpty')\n self.menu_4.addAction(self.help)\n self.menubar.addAction(self.menu_4.menuAction())\n self.retranslateUi(Admin)\n QtCore.QMetaObject.connectSlotsByName(Admin)\n self.model = DataFrameModel()\n self.infoTable.setModel(self.model)\n\n def retranslateUi(self, Admin):\n _translate = QtCore.QCoreApplication.translate\n Admin.setWindowTitle(_translate('Admin', '后台管理界面'))\n self.menu_btn.setText(_translate('Admin', '菜单管理'))\n self.user_btn.setText(_translate('Admin', '用户管理'))\n self.order_btn.setText(_translate('Admin', '订单信息'))\n self.back.setText(_translate('Admin', '返回登录'))\n self.save.setText(_translate('Admin', '保存数据'))\n self.original.setText(_translate('Admin', '初始化'))\n self.fresh.setText(_translate('Admin', '刷新'))\n self.menu_4.setTitle(_translate('Admin', '帮助'))\n self.update1.setText(_translate('Admin', 'update'))\n self.add.setText(_translate('Admin', 'add'))\n self.update2.setText(_translate('Admin', 'update'))\n self.delete_2.setText(_translate('Admin', 'delete'))\n self.delete_3.setText(_translate('Admin', 'delete'))\n self.add_2.setText(_translate('Admin', 'add'))\n self.help.setText(_translate('Admin', 'help'))\n self.actionAllEmpty.setText(_translate('Admin', 'AllEmpty'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'Admin.ui'\n#\n# Created by: PyQt5 UI code generator 5.12\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom qtpandas.views.DataTableView import DataTableWidget\nfrom qtpandas.models.DataFrameModel import DataFrameModel\nimport pandas as pd\n\nclass Ui_Admin(object):\n def setupUi(self, Admin):\n Admin.setObjectName(\"Admin\")\n Admin.resize(679, 490)\n self.centralwidget = QtWidgets.QWidget(Admin)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.menu_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.menu_btn.setFont(font)\n self.menu_btn.setObjectName(\"menu_btn\")\n self.horizontalLayout.addWidget(self.menu_btn)\n self.user_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.user_btn.setFont(font)\n self.user_btn.setObjectName(\"user_btn\")\n self.horizontalLayout.addWidget(self.user_btn)\n self.order_btn = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.order_btn.setFont(font)\n self.order_btn.setObjectName(\"order_btn\")\n self.horizontalLayout.addWidget(self.order_btn)\n self.back = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.back.setFont(font)\n self.back.setObjectName(\"back\")\n self.horizontalLayout.addWidget(self.back)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.infoTable = DataTableWidget(self.centralwidget)\n self.infoTable.setObjectName(\"infoTable\")\n self.verticalLayout.addWidget(self.infoTable)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n \n self.save = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.save.setFont(font)\n self.save.setObjectName(\"save\")\n self.horizontalLayout_2.addWidget(self.save)\n \n self.original = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.original.setFont(font)\n self.original.setObjectName(\"original\")\n self.horizontalLayout_2.addWidget(self.original)\n \n self.fresh = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(15)\n self.fresh.setFont(font)\n self.fresh.setObjectName(\"fresh\")\n self.horizontalLayout_2.addWidget(self.fresh)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n Admin.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Admin)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 679, 23))\n self.menubar.setObjectName(\"menubar\")\n self.menu_4 = QtWidgets.QMenu(self.menubar)\n self.menu_4.setObjectName(\"menu_4\")\n Admin.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Admin)\n self.statusbar.setObjectName(\"statusbar\")\n Admin.setStatusBar(self.statusbar)\n self.update1 = QtWidgets.QAction(Admin)\n self.update1.setObjectName(\"update1\")\n self.add = QtWidgets.QAction(Admin)\n self.add.setObjectName(\"add\")\n self.update2 = QtWidgets.QAction(Admin)\n self.update2.setObjectName(\"update2\")\n self.delete_2 = QtWidgets.QAction(Admin)\n self.delete_2.setObjectName(\"delete_2\")\n self.delete_3 = QtWidgets.QAction(Admin)\n self.delete_3.setObjectName(\"delete_3\")\n self.add_2 = QtWidgets.QAction(Admin)\n self.add_2.setObjectName(\"add_2\")\n self.help = QtWidgets.QAction(Admin)\n self.help.setObjectName(\"help\")\n self.actionAllEmpty = QtWidgets.QAction(Admin)\n self.actionAllEmpty.setObjectName(\"actionAllEmpty\")\n self.menu_4.addAction(self.help)\n self.menubar.addAction(self.menu_4.menuAction())\n\n self.retranslateUi(Admin)\n QtCore.QMetaObject.connectSlotsByName(Admin)\n \n self.model=DataFrameModel()\n self.infoTable.setModel(self.model)\n\n def retranslateUi(self, Admin):\n _translate = QtCore.QCoreApplication.translate\n Admin.setWindowTitle(_translate(\"Admin\", \"后台管理界面\"))\n self.menu_btn.setText(_translate(\"Admin\", \"菜单管理\"))\n self.user_btn.setText(_translate(\"Admin\", \"用户管理\"))\n self.order_btn.setText(_translate(\"Admin\", \"订单信息\"))\n self.back.setText(_translate(\"Admin\", \"返回登录\"))\n self.save.setText(_translate(\"Admin\", \"保存数据\"))\n self.original.setText(_translate(\"Admin\", \"初始化\"))\n self.fresh.setText(_translate(\"Admin\", \"刷新\"))\n self.menu_4.setTitle(_translate(\"Admin\", \"帮助\"))\n self.update1.setText(_translate(\"Admin\", \"update\"))\n self.add.setText(_translate(\"Admin\", \"add\"))\n self.update2.setText(_translate(\"Admin\", \"update\"))\n self.delete_2.setText(_translate(\"Admin\", \"delete\"))\n self.delete_3.setText(_translate(\"Admin\", \"delete\"))\n self.add_2.setText(_translate(\"Admin\", \"add\"))\n self.help.setText(_translate(\"Admin\", \"help\"))\n self.actionAllEmpty.setText(_translate(\"Admin\", \"AllEmpty\"))\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import serial
import time
def main():
# '/dev/tty****' is your port ID
con=serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str=con.readline() # byte code
print (str.strip().decode('utf-8')) # decoded string
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "108c8bbb4d3dbc6b7f32e084b13009296b3c5a80",
"index": 8016,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import serial\nimport time\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import serial\nimport time\n\ndef main():\n # '/dev/tty****' is your port ID\n con=serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str=con.readline() # byte code\n print (str.strip().decode('utf-8')) # decoded string\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
This is a post login API and hence would have APIDetails and SessionDetails in the request object
-------------------------------------------------------------------------------------------------
Step 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4
Step 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes
Step 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output
Step 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output
"""
"""
INPUT:
{
"APIDetails":{
"token_type":1,
"token_vendor_id":1,
"token_string":"sdxfcgvbhjnmklasdfghjk",
"dev_key":"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234"
},
"SessionDetails":{
"profile_id":159,
"session_id":787,
"session_key":"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO"
},
"APIParams":{
"user_ip" : "192.168.0.1"
}
}
"""
"""
OUTPUT:
{
"AuthenticationDetails": {
"Status": "Success",
"Message": "ApiDetails fine to process"
},
"SessionDetails": {
"Status": "Success",
"Message": "session is active. session details updated",
"Payload": {
"profile_id": 159,
"session_id": 787,
"session_key": "LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB"
}
},
"Payload": {
"Status": "Success",
"Message": "ticket types and respective questions Fetched successfully",
"Payload": {
"geo_id": 2,
"geo_name": "Indian Subcontinent",
"geo_currency": "INR"
}
}
}
"""
|
flexible
|
{
"blob_id": "d7daf9b26f0b9f66b15b8533df032d17719e548b",
"index": 3343,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\nThis is a post login API and hence would have APIDetails and SessionDetails in the request object\n-------------------------------------------------------------------------------------------------\nStep 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4\nStep 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes\nStep 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output\nStep 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output\n\"\"\"\n\n\"\"\"\nINPUT:\n{\n \"APIDetails\":{\n \t\"token_type\":1,\n \t\"token_vendor_id\":1,\n \t\"token_string\":\"sdxfcgvbhjnmklasdfghjk\",\n \t\"dev_key\":\"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234\"\n },\n \"SessionDetails\":{\n \"profile_id\":159,\n \"session_id\":787,\n \"session_key\":\"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO\"\n },\n \"APIParams\":{\n \"user_ip\" : \"192.168.0.1\"\n }\n}\n\"\"\"\n\n\"\"\"\nOUTPUT:\n{\n \"AuthenticationDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"ApiDetails fine to process\"\n },\n \"SessionDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"session is active. session details updated\",\n \"Payload\": {\n \"profile_id\": 159,\n \"session_id\": 787,\n \"session_key\": \"LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB\"\n }\n },\n \"Payload\": {\n \"Status\": \"Success\",\n \"Message\": \"ticket types and respective questions Fetched successfully\",\n \"Payload\": {\n \"geo_id\": 2,\n \"geo_name\": \"Indian Subcontinent\",\n \"geo_currency\": \"INR\"\n }\n }\n}\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Rectangulo():
def __init__(self, base, altura):
self.base = base
self.altura = altura
def calcular_area(self):
return self.base * self.altura
base = float(input("Ingrese la base del rectangulo: \n"))
altura = float(input("Ingrese la altura del rectangulo: \n"))
#Primera instancia de rectangulo
rectangulo_1 = Rectangulo(base, altura)
area_rectangulo = rectangulo_1.calcular_area()
print(f"El area del rectangulo de {base} * {altura} = {area_rectangulo}")
|
normal
|
{
"blob_id": "2e60781da004fb86d3a33deae970c1faf2a5037d",
"index": 5793,
"step-1": "class Rectangulo:\n <mask token>\n\n def calcular_area(self):\n return self.base * self.altura\n\n\n<mask token>\n",
"step-2": "class Rectangulo:\n\n def __init__(self, base, altura):\n self.base = base\n self.altura = altura\n\n def calcular_area(self):\n return self.base * self.altura\n\n\n<mask token>\n",
"step-3": "class Rectangulo:\n\n def __init__(self, base, altura):\n self.base = base\n self.altura = altura\n\n def calcular_area(self):\n return self.base * self.altura\n\n\n<mask token>\nprint(f'El area del rectangulo de {base} * {altura} = {area_rectangulo}')\n",
"step-4": "class Rectangulo:\n\n def __init__(self, base, altura):\n self.base = base\n self.altura = altura\n\n def calcular_area(self):\n return self.base * self.altura\n\n\nbase = float(input('Ingrese la base del rectangulo: \\n'))\naltura = float(input('Ingrese la altura del rectangulo: \\n'))\nrectangulo_1 = Rectangulo(base, altura)\narea_rectangulo = rectangulo_1.calcular_area()\nprint(f'El area del rectangulo de {base} * {altura} = {area_rectangulo}')\n",
"step-5": "class Rectangulo():\n\n def __init__(self, base, altura):\n self.base = base\n self.altura = altura\n\n def calcular_area(self):\n return self.base * self.altura\n\nbase = float(input(\"Ingrese la base del rectangulo: \\n\"))\naltura = float(input(\"Ingrese la altura del rectangulo: \\n\"))\n\n#Primera instancia de rectangulo\nrectangulo_1 = Rectangulo(base, altura)\narea_rectangulo = rectangulo_1.calcular_area()\nprint(f\"El area del rectangulo de {base} * {altura} = {area_rectangulo}\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#Άσκηση 3.2: Ουρά δύο άκρων
print("Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n ")
newNumber = input("Για να ξεκινήσεις, πάτησε Enter \n")
alist = []
check = True
while check == True :
newNumber = input("Δώσε μου τη καταχώρηση σου: ")
if newNumber != 'q' and newNumber != 'r' and newNumber != '0r' :
if newNumber[0] != '0' :
alist.append(float(newNumber))
check = True
else :
numberToList = list(newNumber)
numberToList.pop(0)
listToNumber = ''.join(numberToList)
alist.insert(0, float(listToNumber))
check = True
print(alist)
elif newNumber == 'r':
print("\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****", alist[(len(alist) - 1)])
alist.pop((len(alist))-1)
print(alist)
check = True
elif newNumber == '0r' :
print("\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****", alist[0])
alist.pop(0)
print(alist)
check = True
else:
print("\nΤέλος εφαρμογής!")
check = False
#παρατηρήσεις :
#1) Στο πρόγραμμα δεν έχει μπει κάποιος έλεγχος για την εισοδο του χρήστη κι έτσι αν πληκτρολογήσει κάτι εκτος από αριθμό ή 'q' / 'r' / '0r' το πρόγραμμα σκάει
#2) Ο έλεγχος με το 'r', '0r' έγινε εκτός της πρώτης εισόδου για να συμπεριλάβουμε τη περίπτωση που η λίστα ειναι κενή. Αντίστοιχα η εκτέλεση του προγραμματος
#θα βγάλει σφάλμα αν παω να αφαιρέσω και το τελευταιο στοιχειο της λίστας και πατήσω 'r' ή '0r'
|
normal
|
{
"blob_id": "87bcf53d1c93645a08b10ba0d02edf0d5b0a4906",
"index": 5664,
"step-1": "<mask token>\n",
"step-2": "print(\n \"\"\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n \"\"\"\n )\n<mask token>\nwhile check == True:\n newNumber = input('Δώσε μου τη καταχώρηση σου: ')\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':\n if newNumber[0] != '0':\n alist.append(float(newNumber))\n check = True\n else:\n numberToList = list(newNumber)\n numberToList.pop(0)\n listToNumber = ''.join(numberToList)\n alist.insert(0, float(listToNumber))\n check = True\n print(alist)\n elif newNumber == 'r':\n print('\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist\n [len(alist) - 1])\n alist.pop(len(alist) - 1)\n print(alist)\n check = True\n elif newNumber == '0r':\n print('\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])\n alist.pop(0)\n print(alist)\n check = True\n else:\n print('\\nΤέλος εφαρμογής!')\n check = False\n",
"step-3": "print(\n \"\"\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\n \"\"\"\n )\nnewNumber = input('Για να ξεκινήσεις, πάτησε Enter \\n')\nalist = []\ncheck = True\nwhile check == True:\n newNumber = input('Δώσε μου τη καταχώρηση σου: ')\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r':\n if newNumber[0] != '0':\n alist.append(float(newNumber))\n check = True\n else:\n numberToList = list(newNumber)\n numberToList.pop(0)\n listToNumber = ''.join(numberToList)\n alist.insert(0, float(listToNumber))\n check = True\n print(alist)\n elif newNumber == 'r':\n print('\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****', alist\n [len(alist) - 1])\n alist.pop(len(alist) - 1)\n print(alist)\n check = True\n elif newNumber == '0r':\n print('\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****', alist[0])\n alist.pop(0)\n print(alist)\n check = True\n else:\n print('\\nΤέλος εφαρμογής!')\n check = False\n",
"step-4": "#Άσκηση 3.2: Ουρά δύο άκρων\r\n\r\nprint(\"Οδηγίες: Το πρόγραμμα καταχωρει αριθμους σε μια λίστα! Τρέχει σε άπειρο βρόχο, έως ότου πληκτρολογήσεις 'q'. \\nΑν θελήσεις να βγάλεις το πρώτο στοιχείο της λίστας, πληκτρολόγησε '0r' ενώ,\\nαν θέλεις να βγάλεις το τελευταιο, πληκτρολόγησε 'r'\\n \")\r\n\r\nnewNumber = input(\"Για να ξεκινήσεις, πάτησε Enter \\n\")\r\nalist = []\r\ncheck = True\r\n\r\nwhile check == True :\r\n \r\n newNumber = input(\"Δώσε μου τη καταχώρηση σου: \")\r\n if newNumber != 'q' and newNumber != 'r' and newNumber != '0r' :\r\n if newNumber[0] != '0' :\r\n alist.append(float(newNumber))\r\n check = True \r\n else :\r\n numberToList = list(newNumber)\r\n numberToList.pop(0)\r\n listToNumber = ''.join(numberToList)\r\n alist.insert(0, float(listToNumber))\r\n check = True\r\n print(alist)\r\n\r\n \r\n elif newNumber == 'r':\r\n print(\"\\n*****Από τη λίστα βγήκε το τελευταίο στοιχειο*****\", alist[(len(alist) - 1)])\r\n alist.pop((len(alist))-1)\r\n print(alist)\r\n check = True\r\n elif newNumber == '0r' :\r\n print(\"\\n*****Από τη λίστα βγήκε το πρώτο στοιχειο*****\", alist[0])\r\n alist.pop(0)\r\n print(alist)\r\n check = True\r\n \r\n else:\r\n print(\"\\nΤέλος εφαρμογής!\")\r\n check = False\r\n\r\n \r\n#παρατηρήσεις :\r\n#1) Στο πρόγραμμα δεν έχει μπει κάποιος έλεγχος για την εισοδο του χρήστη κι έτσι αν πληκτρολογήσει κάτι εκτος από αριθμό ή 'q' / 'r' / '0r' το πρόγραμμα σκάει\r\n#2) Ο έλεγχος με το 'r', '0r' έγινε εκτός της πρώτης εισόδου για να συμπεριλάβουμε τη περίπτωση που η λίστα ειναι κενή. Αντίστοιχα η εκτέλεση του προγραμματος\r\n #θα βγάλει σφάλμα αν παω να αφαιρέσω και το τελευταιο στοιχειο της λίστας και πατήσω 'r' ή '0r'\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from disaggregation import DisaggregationManager
import numpy as np
from more_itertools import windowed
x = np.random.random_sample(10 * 32 * 1024)
w = windowed(x, n=1024, step=128)
z = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)
print(z.shape)
print(x.shape)
assert z.shape == x.shape
|
normal
|
{
"blob_id": "6d4950ca61cd1e2ee7ef8b409577e9df2d65addd",
"index": 4462,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-3": "<mask token>\nx = np.random.random_sample(10 * 32 * 1024)\nw = windowed(x, n=1024, step=128)\nz = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-4": "from disaggregation import DisaggregationManager\nimport numpy as np\nfrom more_itertools import windowed\nx = np.random.random_sample(10 * 32 * 1024)\nw = windowed(x, n=1024, step=128)\nz = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import numpy as np
import matplotlib.pyplot as plt
class stochasticGradient :
def __init__( self , kwargs ) :
self.inputVectors = kwargs["inputVectors"]
self.expectedOutput = kwargs["expectedOutput"]
self.noOfEpochs = kwargs["noOfEpochs"]
self.activationFnsForAllLayers = kwargs["activationFnsForAllLayers"]
self.noOfUnitsInEachLayer = kwargs["noOfUnitsInEachLayer"]
self.loss = kwargs["lossFn"]
self.learningRate = kwargs["learningRate"]
self.batchSize = kwargs["batchSize"]
self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2
def start(self) :
self.setInitialWeights()
self.startAlgo()
self.plotLoss()
self.plotDecisionBoundary() #Can only be used in case of 2-D data
def plotDecisionBoundary(self) :
x_min = np.floor(min( self.inputVectors[:,0] ))
x_max = np.ceil(max( self.inputVectors[:,0] ))
y_min = np.floor(min( self.inputVectors[:,1] ))
y_max = np.ceil(max( self.inputVectors[:,1] ))
input = [(x, y) for x in np.arange(x_min, x_max, .05) for y in np.arange(y_min, y_max, .05)]
inputT = np.array( input )
output = self.forwardPass( inputT )
for i in range(len(output)):
if output[i] == 0:
plt.plot(input[i][0], input[i][1], 'co')
elif output[i] < 0:
plt.plot(input[i][0], input[i][1], 'r.')
elif output[i] > 0:
plt.plot(input[i][0], input[i][1], 'b.')
self.plotData()
plt.show()
def plotData(self) :
expectedOutputAsList = list(self.expectedOutput[:])
positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if x == 1]
negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if x == -1]
positiveX = [self.inputVectors[j][0] for j in positiveIndices]
positiveY = [self.inputVectors[j][1] for j in positiveIndices]
negativeX = [self.inputVectors[j][0] for j in negativeIndices]
negativeY = [self.inputVectors[j][1] for j in negativeIndices]
plt.scatter(positiveX , positiveY , color = "blue" , marker = "X" )
plt.scatter(negativeX , negativeY , color = "red" , marker = "X" )
def plotLoss(self) :
plt.plot(range(len(self.loss_list)) , self.loss_list , "--")
plt.show()
def setInitialWeights(self) :
self.setOfWeights = {}
self.setOfWeightsForBiasTerm = {}
for i in range(self.noOfHiddenLayers + 1) :
noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i+1]
noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]
self.setOfWeightsForBiasTerm[i, i+1] = np.zeros(shape = (noOfUnitsInNextLayer, 1))
self.setOfWeights[i, i+1] = np.random.normal(size = (noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))
def startAlgo(self) :
self.loss_list = []
j = 0
avg_loss = 100
noOfIterations = self.inputVectors.shape[0]//self.batchSize
while j < self.noOfEpochs and avg_loss >= 0.01 :
k = 0
avg_loss = 0
while k < noOfIterations :
self.predictedOutput = self.forwardPass( self.inputVectors )
loss = self.getLoss()
self.loss_list.append( loss )
batchIndexRange = range( self.batchSize*k , (self.batchSize*(k+1)))
self.backpropagation( batchIndexRange )
avg_loss += loss
k += 1
avg_loss = avg_loss/noOfIterations
j += 1
# print("list(zip(self.predictedOutput , self.expectedOutput)) : " , list(zip(self.predictedOutput , self.expectedOutput)))
global start_time
print("--- %s seconds ---" %(time.time()-start_time))
def backpropagation(self , batchIndexRange) :
self.calculateActivationFnDerivative()
self.getWeightUpdationForOutputLayer( batchIndexRange )
self.getWeightUpdationForHiddenLayers( batchIndexRange )
self.updateWeights()
def updateWeights(self) :
for h in range(self.noOfHiddenLayers + 1) :
self.setOfWeights[h,h+1] -= self.learningRate * self.weightsDelta[h,h+1]
self.setOfWeightsForBiasTerm[h,h+1] -= self.learningRate * self.biasWeightsDelta[h,h+1]
def getWeightUpdationForHiddenLayers(self , batchIndexRange) :
self.deltaContribution = self.deltaContribution.transpose((0,2,1))
for h in range(self.noOfHiddenLayers, 0, -1) :
weights = self.setOfWeights[h, h+1]
activationDerivative = self.activationDerivative[h][batchIndexRange].transpose((0,2,1))
self.deltaContribution = np.matmul(self.deltaContribution , weights * activationDerivative)
activationPrevLayer = self.activation[h-1][batchIndexRange]
self.weightsDelta[h-1,h] = np.mean(np.matmul(activationPrevLayer , self.deltaContribution) , axis=0).T
self.biasWeightsDelta[h-1,h] = np.mean(self.deltaContribution , axis=0).T
def getWeightUpdationForOutputLayer(self , batchIndexRange) :
self.weightsDelta = {}
self.biasWeightsDelta = {}
outputLayerIndex = self.noOfHiddenLayers+1
prevLayerToOutputLayerIndex = outputLayerIndex-1
predictedOutput = self.predictedOutput[batchIndexRange]
expectedOutput = np.expand_dims(self.expectedOutput , axis=2)[batchIndexRange]
lossDerivativeFn = self.loss + "Derivative"
lossDerivative = globals()[lossDerivativeFn](predictedOutput, expectedOutput)
self.deltaContribution = lossDerivative * self.activationDerivative[outputLayerIndex][batchIndexRange]
activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][batchIndexRange]
self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(np.matmul( self.deltaContribution , activationAtPrevLayer.transpose((0, 2, 1))) , axis=0)
self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(self.deltaContribution , axis=0)
def calculateActivationFnDerivative(self) :
self.activationDerivative = {}
for h in range( self.noOfHiddenLayers+1 ) :
activationDerivativeFn = self.activationFnsForAllLayers[h] + "Derivative"
self.activationDerivative[h+1] = globals()[activationDerivativeFn]( self.weightedSums[h+1] )
def getLoss(self) :
lossFn = globals()[ self.loss ]
expectedOutput = np.expand_dims(self.expectedOutput , axis=2)
return lossFn( self.predictedOutput , expectedOutput )
def forwardPass(self , data) :
self.activation = {}
self.weightedSums = {}
self.activation[0] = np.expand_dims( data , axis = 2 )
for h in range( self.noOfHiddenLayers+1 ) :
self.weightedSums[h+1] = np.matmul(self.setOfWeights[h,h+1] , self.activation[h]) + self.setOfWeightsForBiasTerm[h, h+1]
activationFnForGivenLayer = self.activationFnsForAllLayers[h]
self.activation[h+1] = globals()[activationFnForGivenLayer]( self.weightedSums[h+1] )
outputLayerIndex = self.noOfHiddenLayers + 1
return self.activation[outputLayerIndex]
start_time = time.time()
def sigmoid(x) :
return 1/(1+np.exp(-x))
def tanh(x) :
return np.tanh(x)
def l2_norm_squared(x, y) :
return np.mean((x-y)**2)/2
def l2_norm_squaredDerivative(x, y) :
noOfDataPts = x.shape[0]
return (x-y)/noOfDataPts
def sigmoidDerivative(x) :
return sigmoid(x)*(1-sigmoid(x))
def tanhDerivative(x) :
return (1-tanh(x) ** 2)
def ellipseFn(x , a , b) :
return (b/a)*((a**2-x**2)**0.5)
# CREATING LINEARLY SEPARABLE DATA
def runForLinearlySeparableData() :
args = {}
noOfDataPts = 80
shuffledIndices = np.random.permutation( noOfDataPts )
args["inputVectors"] = (np.concatenate((np.random.normal(loc=10, size=[40, 2]), np.random.normal(loc=20, size=[40, 2]))) / 20)[shuffledIndices]
args["expectedOutput"] = (np.concatenate((np.ones(shape=(40, 1)), -np.ones(shape=(40, 1)))))[shuffledIndices]
args["noOfEpochs"] = 100000
args["activationFnsForAllLayers"] = ["tanh"]*3
args["noOfUnitsInEachLayer"] = [ 2, 6, 6, 1 ]
args["lossFn"] = "l2_norm_squared"
args["learningRate"] = 0.1
args["batchSize"] = 1
stochasticGradientObj = stochasticGradient( args )
stochasticGradientObj.start()
# CREATING TWO CONCENTRIC ELLIPSES
def runForEllipseData() :
inputs = {}
r = [ 2 , 5 ]
h = 0.2
inputVectorsList = []
expectedOutput = []
for i in r :
t = (i-(-i))/h
x = np.linspace(-i , i , t)
vectorizedEllipseFn = np.vectorize( ellipseFn )
y = vectorizedEllipseFn( x , i , i )
for j in range(len(x)):
inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]
if i == 2 :
expectedOutput.append([1])
expectedOutput.append([1])
else:
expectedOutput.append([-1])
expectedOutput.append([-1])
perm = np.random.permutation(140)
inputs["inputVectors"] = np.array(inputVectorsList)[perm]/5
inputs["expectedOutput"] = np.array(expectedOutput)[perm]
inputs["noOfEpochs"] = 200000
inputs["activationFnsForAllLayers"] = ["tanh" , "tanh" ]
inputs["noOfUnitsInEachLayer"] = [ 2 , 3 , 1 ]
inputs["lossFn"] = "l2_norm_squared"
inputs["learningRate"] = 0.5
inputs["batchSize"] = 140
stochasticGradientObj = stochasticGradient( inputs )
stochasticGradientObj.start()
# CREATING XOR DATA
def runForXORdata() :
inputs = {}
inputs["inputVectors"] = np.array([[0,0] , [0,1] , [1,1] , [1,0]])
inputs["expectedOutput"] = np.array([[-1],[1],[-1],[1]])
inputs["noOfEpochs"] = 200000
inputs["activationFnsForAllLayers"] = ["tanh" , "tanh" ]
inputs["noOfUnitsInEachLayer"] = [ 2 , 3 , 1 ]
inputs["lossFn"] = "l2_norm_squared"
inputs["learningRate"] = 0.05
inputs["batchSize"] = 1
stochasticGradientObj = stochasticGradient( inputs )
stochasticGradientObj.start()
runForLinearlySeparableData()
runForEllipseData()
runForXORdata()
|
normal
|
{
"blob_id": "775900d4c059c89bfb10f5c3c2a924a41a049438",
"index": 8205,
"step-1": "<mask token>\n\n\nclass stochasticGradient:\n\n def __init__(self, kwargs):\n self.inputVectors = kwargs['inputVectors']\n self.expectedOutput = kwargs['expectedOutput']\n self.noOfEpochs = kwargs['noOfEpochs']\n self.activationFnsForAllLayers = kwargs['activationFnsForAllLayers']\n self.noOfUnitsInEachLayer = kwargs['noOfUnitsInEachLayer']\n self.loss = kwargs['lossFn']\n self.learningRate = kwargs['learningRate']\n self.batchSize = kwargs['batchSize']\n self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2\n\n def start(self):\n self.setInitialWeights()\n self.startAlgo()\n self.plotLoss()\n self.plotDecisionBoundary()\n\n def plotDecisionBoundary(self):\n x_min = np.floor(min(self.inputVectors[:, 0]))\n x_max = np.ceil(max(self.inputVectors[:, 0]))\n y_min = np.floor(min(self.inputVectors[:, 1]))\n y_max = np.ceil(max(self.inputVectors[:, 1]))\n input = [(x, y) for x in np.arange(x_min, x_max, 0.05) for y in np.\n arange(y_min, y_max, 0.05)]\n inputT = np.array(input)\n output = self.forwardPass(inputT)\n for i in range(len(output)):\n if output[i] == 0:\n plt.plot(input[i][0], input[i][1], 'co')\n elif output[i] < 0:\n plt.plot(input[i][0], input[i][1], 'r.')\n elif output[i] > 0:\n plt.plot(input[i][0], input[i][1], 'b.')\n self.plotData()\n plt.show()\n\n def plotData(self):\n expectedOutputAsList = list(self.expectedOutput[:])\n positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == 1]\n negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == -1]\n positiveX = [self.inputVectors[j][0] for j in positiveIndices]\n positiveY = [self.inputVectors[j][1] for j in positiveIndices]\n negativeX = [self.inputVectors[j][0] for j in negativeIndices]\n negativeY = [self.inputVectors[j][1] for j in negativeIndices]\n plt.scatter(positiveX, positiveY, color='blue', marker='X')\n plt.scatter(negativeX, negativeY, color='red', marker='X')\n\n def plotLoss(self):\n plt.plot(range(len(self.loss_list)), self.loss_list, '--')\n plt.show()\n\n def setInitialWeights(self):\n self.setOfWeights = {}\n self.setOfWeightsForBiasTerm = {}\n for i in range(self.noOfHiddenLayers + 1):\n noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i + 1]\n noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]\n self.setOfWeightsForBiasTerm[i, i + 1] = np.zeros(shape=(\n noOfUnitsInNextLayer, 1))\n self.setOfWeights[i, i + 1] = np.random.normal(size=(\n noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))\n\n def startAlgo(self):\n self.loss_list = []\n j = 0\n avg_loss = 100\n noOfIterations = self.inputVectors.shape[0] // self.batchSize\n while j < self.noOfEpochs and avg_loss >= 0.01:\n k = 0\n avg_loss = 0\n while k < noOfIterations:\n self.predictedOutput = self.forwardPass(self.inputVectors)\n loss = self.getLoss()\n self.loss_list.append(loss)\n batchIndexRange = range(self.batchSize * k, self.batchSize *\n (k + 1))\n self.backpropagation(batchIndexRange)\n avg_loss += loss\n k += 1\n avg_loss = avg_loss / noOfIterations\n j += 1\n global start_time\n print('--- %s seconds ---' % (time.time() - start_time))\n\n def backpropagation(self, batchIndexRange):\n self.calculateActivationFnDerivative()\n self.getWeightUpdationForOutputLayer(batchIndexRange)\n self.getWeightUpdationForHiddenLayers(batchIndexRange)\n self.updateWeights()\n\n def updateWeights(self):\n for h in range(self.noOfHiddenLayers + 1):\n self.setOfWeights[h, h + 1\n ] -= self.learningRate * self.weightsDelta[h, h + 1]\n self.setOfWeightsForBiasTerm[h, h + 1\n ] -= self.learningRate * self.biasWeightsDelta[h, h + 1]\n\n def getWeightUpdationForHiddenLayers(self, batchIndexRange):\n self.deltaContribution = self.deltaContribution.transpose((0, 2, 1))\n for h in range(self.noOfHiddenLayers, 0, -1):\n weights = self.setOfWeights[h, h + 1]\n activationDerivative = self.activationDerivative[h][batchIndexRange\n ].transpose((0, 2, 1))\n self.deltaContribution = np.matmul(self.deltaContribution, \n weights * activationDerivative)\n activationPrevLayer = self.activation[h - 1][batchIndexRange]\n self.weightsDelta[h - 1, h] = np.mean(np.matmul(\n activationPrevLayer, self.deltaContribution), axis=0).T\n self.biasWeightsDelta[h - 1, h] = np.mean(self.\n deltaContribution, axis=0).T\n\n def getWeightUpdationForOutputLayer(self, batchIndexRange):\n self.weightsDelta = {}\n self.biasWeightsDelta = {}\n outputLayerIndex = self.noOfHiddenLayers + 1\n prevLayerToOutputLayerIndex = outputLayerIndex - 1\n predictedOutput = self.predictedOutput[batchIndexRange]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)[\n batchIndexRange]\n lossDerivativeFn = self.loss + 'Derivative'\n lossDerivative = globals()[lossDerivativeFn](predictedOutput,\n expectedOutput)\n self.deltaContribution = lossDerivative * self.activationDerivative[\n outputLayerIndex][batchIndexRange]\n activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][\n batchIndexRange]\n self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(np.matmul(self.deltaContribution,\n activationAtPrevLayer.transpose((0, 2, 1))), axis=0)\n self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(self.deltaContribution, axis=0)\n\n def calculateActivationFnDerivative(self):\n self.activationDerivative = {}\n for h in range(self.noOfHiddenLayers + 1):\n activationDerivativeFn = self.activationFnsForAllLayers[h\n ] + 'Derivative'\n self.activationDerivative[h + 1] = globals()[activationDerivativeFn\n ](self.weightedSums[h + 1])\n\n def getLoss(self):\n lossFn = globals()[self.loss]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)\n return lossFn(self.predictedOutput, expectedOutput)\n\n def forwardPass(self, data):\n self.activation = {}\n self.weightedSums = {}\n self.activation[0] = np.expand_dims(data, axis=2)\n for h in range(self.noOfHiddenLayers + 1):\n self.weightedSums[h + 1] = np.matmul(self.setOfWeights[h, h + 1\n ], self.activation[h]) + self.setOfWeightsForBiasTerm[h, h + 1]\n activationFnForGivenLayer = self.activationFnsForAllLayers[h]\n self.activation[h + 1] = globals()[activationFnForGivenLayer](self\n .weightedSums[h + 1])\n outputLayerIndex = self.noOfHiddenLayers + 1\n return self.activation[outputLayerIndex]\n\n\n<mask token>\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass stochasticGradient:\n\n def __init__(self, kwargs):\n self.inputVectors = kwargs['inputVectors']\n self.expectedOutput = kwargs['expectedOutput']\n self.noOfEpochs = kwargs['noOfEpochs']\n self.activationFnsForAllLayers = kwargs['activationFnsForAllLayers']\n self.noOfUnitsInEachLayer = kwargs['noOfUnitsInEachLayer']\n self.loss = kwargs['lossFn']\n self.learningRate = kwargs['learningRate']\n self.batchSize = kwargs['batchSize']\n self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2\n\n def start(self):\n self.setInitialWeights()\n self.startAlgo()\n self.plotLoss()\n self.plotDecisionBoundary()\n\n def plotDecisionBoundary(self):\n x_min = np.floor(min(self.inputVectors[:, 0]))\n x_max = np.ceil(max(self.inputVectors[:, 0]))\n y_min = np.floor(min(self.inputVectors[:, 1]))\n y_max = np.ceil(max(self.inputVectors[:, 1]))\n input = [(x, y) for x in np.arange(x_min, x_max, 0.05) for y in np.\n arange(y_min, y_max, 0.05)]\n inputT = np.array(input)\n output = self.forwardPass(inputT)\n for i in range(len(output)):\n if output[i] == 0:\n plt.plot(input[i][0], input[i][1], 'co')\n elif output[i] < 0:\n plt.plot(input[i][0], input[i][1], 'r.')\n elif output[i] > 0:\n plt.plot(input[i][0], input[i][1], 'b.')\n self.plotData()\n plt.show()\n\n def plotData(self):\n expectedOutputAsList = list(self.expectedOutput[:])\n positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == 1]\n negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == -1]\n positiveX = [self.inputVectors[j][0] for j in positiveIndices]\n positiveY = [self.inputVectors[j][1] for j in positiveIndices]\n negativeX = [self.inputVectors[j][0] for j in negativeIndices]\n negativeY = [self.inputVectors[j][1] for j in negativeIndices]\n plt.scatter(positiveX, positiveY, color='blue', marker='X')\n plt.scatter(negativeX, negativeY, color='red', marker='X')\n\n def plotLoss(self):\n plt.plot(range(len(self.loss_list)), self.loss_list, '--')\n plt.show()\n\n def setInitialWeights(self):\n self.setOfWeights = {}\n self.setOfWeightsForBiasTerm = {}\n for i in range(self.noOfHiddenLayers + 1):\n noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i + 1]\n noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]\n self.setOfWeightsForBiasTerm[i, i + 1] = np.zeros(shape=(\n noOfUnitsInNextLayer, 1))\n self.setOfWeights[i, i + 1] = np.random.normal(size=(\n noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))\n\n def startAlgo(self):\n self.loss_list = []\n j = 0\n avg_loss = 100\n noOfIterations = self.inputVectors.shape[0] // self.batchSize\n while j < self.noOfEpochs and avg_loss >= 0.01:\n k = 0\n avg_loss = 0\n while k < noOfIterations:\n self.predictedOutput = self.forwardPass(self.inputVectors)\n loss = self.getLoss()\n self.loss_list.append(loss)\n batchIndexRange = range(self.batchSize * k, self.batchSize *\n (k + 1))\n self.backpropagation(batchIndexRange)\n avg_loss += loss\n k += 1\n avg_loss = avg_loss / noOfIterations\n j += 1\n global start_time\n print('--- %s seconds ---' % (time.time() - start_time))\n\n def backpropagation(self, batchIndexRange):\n self.calculateActivationFnDerivative()\n self.getWeightUpdationForOutputLayer(batchIndexRange)\n self.getWeightUpdationForHiddenLayers(batchIndexRange)\n self.updateWeights()\n\n def updateWeights(self):\n for h in range(self.noOfHiddenLayers + 1):\n self.setOfWeights[h, h + 1\n ] -= self.learningRate * self.weightsDelta[h, h + 1]\n self.setOfWeightsForBiasTerm[h, h + 1\n ] -= self.learningRate * self.biasWeightsDelta[h, h + 1]\n\n def getWeightUpdationForHiddenLayers(self, batchIndexRange):\n self.deltaContribution = self.deltaContribution.transpose((0, 2, 1))\n for h in range(self.noOfHiddenLayers, 0, -1):\n weights = self.setOfWeights[h, h + 1]\n activationDerivative = self.activationDerivative[h][batchIndexRange\n ].transpose((0, 2, 1))\n self.deltaContribution = np.matmul(self.deltaContribution, \n weights * activationDerivative)\n activationPrevLayer = self.activation[h - 1][batchIndexRange]\n self.weightsDelta[h - 1, h] = np.mean(np.matmul(\n activationPrevLayer, self.deltaContribution), axis=0).T\n self.biasWeightsDelta[h - 1, h] = np.mean(self.\n deltaContribution, axis=0).T\n\n def getWeightUpdationForOutputLayer(self, batchIndexRange):\n self.weightsDelta = {}\n self.biasWeightsDelta = {}\n outputLayerIndex = self.noOfHiddenLayers + 1\n prevLayerToOutputLayerIndex = outputLayerIndex - 1\n predictedOutput = self.predictedOutput[batchIndexRange]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)[\n batchIndexRange]\n lossDerivativeFn = self.loss + 'Derivative'\n lossDerivative = globals()[lossDerivativeFn](predictedOutput,\n expectedOutput)\n self.deltaContribution = lossDerivative * self.activationDerivative[\n outputLayerIndex][batchIndexRange]\n activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][\n batchIndexRange]\n self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(np.matmul(self.deltaContribution,\n activationAtPrevLayer.transpose((0, 2, 1))), axis=0)\n self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(self.deltaContribution, axis=0)\n\n def calculateActivationFnDerivative(self):\n self.activationDerivative = {}\n for h in range(self.noOfHiddenLayers + 1):\n activationDerivativeFn = self.activationFnsForAllLayers[h\n ] + 'Derivative'\n self.activationDerivative[h + 1] = globals()[activationDerivativeFn\n ](self.weightedSums[h + 1])\n\n def getLoss(self):\n lossFn = globals()[self.loss]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)\n return lossFn(self.predictedOutput, expectedOutput)\n\n def forwardPass(self, data):\n self.activation = {}\n self.weightedSums = {}\n self.activation[0] = np.expand_dims(data, axis=2)\n for h in range(self.noOfHiddenLayers + 1):\n self.weightedSums[h + 1] = np.matmul(self.setOfWeights[h, h + 1\n ], self.activation[h]) + self.setOfWeightsForBiasTerm[h, h + 1]\n activationFnForGivenLayer = self.activationFnsForAllLayers[h]\n self.activation[h + 1] = globals()[activationFnForGivenLayer](self\n .weightedSums[h + 1])\n outputLayerIndex = self.noOfHiddenLayers + 1\n return self.activation[outputLayerIndex]\n\n\n<mask token>\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\n<mask token>\n\n\ndef runForEllipseData():\n inputs = {}\n r = [2, 5]\n h = 0.2\n inputVectorsList = []\n expectedOutput = []\n for i in r:\n t = (i - -i) / h\n x = np.linspace(-i, i, t)\n vectorizedEllipseFn = np.vectorize(ellipseFn)\n y = vectorizedEllipseFn(x, i, i)\n for j in range(len(x)):\n inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]\n if i == 2:\n expectedOutput.append([1])\n expectedOutput.append([1])\n else:\n expectedOutput.append([-1])\n expectedOutput.append([-1])\n perm = np.random.permutation(140)\n inputs['inputVectors'] = np.array(inputVectorsList)[perm] / 5\n inputs['expectedOutput'] = np.array(expectedOutput)[perm]\n inputs['noOfEpochs'] = 200000\n inputs['activationFnsForAllLayers'] = ['tanh', 'tanh']\n inputs['noOfUnitsInEachLayer'] = [2, 3, 1]\n inputs['lossFn'] = 'l2_norm_squared'\n inputs['learningRate'] = 0.5\n inputs['batchSize'] = 140\n stochasticGradientObj = stochasticGradient(inputs)\n stochasticGradientObj.start()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass stochasticGradient:\n\n def __init__(self, kwargs):\n self.inputVectors = kwargs['inputVectors']\n self.expectedOutput = kwargs['expectedOutput']\n self.noOfEpochs = kwargs['noOfEpochs']\n self.activationFnsForAllLayers = kwargs['activationFnsForAllLayers']\n self.noOfUnitsInEachLayer = kwargs['noOfUnitsInEachLayer']\n self.loss = kwargs['lossFn']\n self.learningRate = kwargs['learningRate']\n self.batchSize = kwargs['batchSize']\n self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2\n\n def start(self):\n self.setInitialWeights()\n self.startAlgo()\n self.plotLoss()\n self.plotDecisionBoundary()\n\n def plotDecisionBoundary(self):\n x_min = np.floor(min(self.inputVectors[:, 0]))\n x_max = np.ceil(max(self.inputVectors[:, 0]))\n y_min = np.floor(min(self.inputVectors[:, 1]))\n y_max = np.ceil(max(self.inputVectors[:, 1]))\n input = [(x, y) for x in np.arange(x_min, x_max, 0.05) for y in np.\n arange(y_min, y_max, 0.05)]\n inputT = np.array(input)\n output = self.forwardPass(inputT)\n for i in range(len(output)):\n if output[i] == 0:\n plt.plot(input[i][0], input[i][1], 'co')\n elif output[i] < 0:\n plt.plot(input[i][0], input[i][1], 'r.')\n elif output[i] > 0:\n plt.plot(input[i][0], input[i][1], 'b.')\n self.plotData()\n plt.show()\n\n def plotData(self):\n expectedOutputAsList = list(self.expectedOutput[:])\n positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == 1]\n negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == -1]\n positiveX = [self.inputVectors[j][0] for j in positiveIndices]\n positiveY = [self.inputVectors[j][1] for j in positiveIndices]\n negativeX = [self.inputVectors[j][0] for j in negativeIndices]\n negativeY = [self.inputVectors[j][1] for j in negativeIndices]\n plt.scatter(positiveX, positiveY, color='blue', marker='X')\n plt.scatter(negativeX, negativeY, color='red', marker='X')\n\n def plotLoss(self):\n plt.plot(range(len(self.loss_list)), self.loss_list, '--')\n plt.show()\n\n def setInitialWeights(self):\n self.setOfWeights = {}\n self.setOfWeightsForBiasTerm = {}\n for i in range(self.noOfHiddenLayers + 1):\n noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i + 1]\n noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]\n self.setOfWeightsForBiasTerm[i, i + 1] = np.zeros(shape=(\n noOfUnitsInNextLayer, 1))\n self.setOfWeights[i, i + 1] = np.random.normal(size=(\n noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))\n\n def startAlgo(self):\n self.loss_list = []\n j = 0\n avg_loss = 100\n noOfIterations = self.inputVectors.shape[0] // self.batchSize\n while j < self.noOfEpochs and avg_loss >= 0.01:\n k = 0\n avg_loss = 0\n while k < noOfIterations:\n self.predictedOutput = self.forwardPass(self.inputVectors)\n loss = self.getLoss()\n self.loss_list.append(loss)\n batchIndexRange = range(self.batchSize * k, self.batchSize *\n (k + 1))\n self.backpropagation(batchIndexRange)\n avg_loss += loss\n k += 1\n avg_loss = avg_loss / noOfIterations\n j += 1\n global start_time\n print('--- %s seconds ---' % (time.time() - start_time))\n\n def backpropagation(self, batchIndexRange):\n self.calculateActivationFnDerivative()\n self.getWeightUpdationForOutputLayer(batchIndexRange)\n self.getWeightUpdationForHiddenLayers(batchIndexRange)\n self.updateWeights()\n\n def updateWeights(self):\n for h in range(self.noOfHiddenLayers + 1):\n self.setOfWeights[h, h + 1\n ] -= self.learningRate * self.weightsDelta[h, h + 1]\n self.setOfWeightsForBiasTerm[h, h + 1\n ] -= self.learningRate * self.biasWeightsDelta[h, h + 1]\n\n def getWeightUpdationForHiddenLayers(self, batchIndexRange):\n self.deltaContribution = self.deltaContribution.transpose((0, 2, 1))\n for h in range(self.noOfHiddenLayers, 0, -1):\n weights = self.setOfWeights[h, h + 1]\n activationDerivative = self.activationDerivative[h][batchIndexRange\n ].transpose((0, 2, 1))\n self.deltaContribution = np.matmul(self.deltaContribution, \n weights * activationDerivative)\n activationPrevLayer = self.activation[h - 1][batchIndexRange]\n self.weightsDelta[h - 1, h] = np.mean(np.matmul(\n activationPrevLayer, self.deltaContribution), axis=0).T\n self.biasWeightsDelta[h - 1, h] = np.mean(self.\n deltaContribution, axis=0).T\n\n def getWeightUpdationForOutputLayer(self, batchIndexRange):\n self.weightsDelta = {}\n self.biasWeightsDelta = {}\n outputLayerIndex = self.noOfHiddenLayers + 1\n prevLayerToOutputLayerIndex = outputLayerIndex - 1\n predictedOutput = self.predictedOutput[batchIndexRange]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)[\n batchIndexRange]\n lossDerivativeFn = self.loss + 'Derivative'\n lossDerivative = globals()[lossDerivativeFn](predictedOutput,\n expectedOutput)\n self.deltaContribution = lossDerivative * self.activationDerivative[\n outputLayerIndex][batchIndexRange]\n activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][\n batchIndexRange]\n self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(np.matmul(self.deltaContribution,\n activationAtPrevLayer.transpose((0, 2, 1))), axis=0)\n self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(self.deltaContribution, axis=0)\n\n def calculateActivationFnDerivative(self):\n self.activationDerivative = {}\n for h in range(self.noOfHiddenLayers + 1):\n activationDerivativeFn = self.activationFnsForAllLayers[h\n ] + 'Derivative'\n self.activationDerivative[h + 1] = globals()[activationDerivativeFn\n ](self.weightedSums[h + 1])\n\n def getLoss(self):\n lossFn = globals()[self.loss]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)\n return lossFn(self.predictedOutput, expectedOutput)\n\n def forwardPass(self, data):\n self.activation = {}\n self.weightedSums = {}\n self.activation[0] = np.expand_dims(data, axis=2)\n for h in range(self.noOfHiddenLayers + 1):\n self.weightedSums[h + 1] = np.matmul(self.setOfWeights[h, h + 1\n ], self.activation[h]) + self.setOfWeightsForBiasTerm[h, h + 1]\n activationFnForGivenLayer = self.activationFnsForAllLayers[h]\n self.activation[h + 1] = globals()[activationFnForGivenLayer](self\n .weightedSums[h + 1])\n outputLayerIndex = self.noOfHiddenLayers + 1\n return self.activation[outputLayerIndex]\n\n\n<mask token>\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef l2_norm_squared(x, y):\n return np.mean((x - y) ** 2) / 2\n\n\ndef l2_norm_squaredDerivative(x, y):\n noOfDataPts = x.shape[0]\n return (x - y) / noOfDataPts\n\n\ndef sigmoidDerivative(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\n\ndef tanhDerivative(x):\n return 1 - tanh(x) ** 2\n\n\ndef ellipseFn(x, a, b):\n return b / a * (a ** 2 - x ** 2) ** 0.5\n\n\n<mask token>\n\n\ndef runForEllipseData():\n inputs = {}\n r = [2, 5]\n h = 0.2\n inputVectorsList = []\n expectedOutput = []\n for i in r:\n t = (i - -i) / h\n x = np.linspace(-i, i, t)\n vectorizedEllipseFn = np.vectorize(ellipseFn)\n y = vectorizedEllipseFn(x, i, i)\n for j in range(len(x)):\n inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]\n if i == 2:\n expectedOutput.append([1])\n expectedOutput.append([1])\n else:\n expectedOutput.append([-1])\n expectedOutput.append([-1])\n perm = np.random.permutation(140)\n inputs['inputVectors'] = np.array(inputVectorsList)[perm] / 5\n inputs['expectedOutput'] = np.array(expectedOutput)[perm]\n inputs['noOfEpochs'] = 200000\n inputs['activationFnsForAllLayers'] = ['tanh', 'tanh']\n inputs['noOfUnitsInEachLayer'] = [2, 3, 1]\n inputs['lossFn'] = 'l2_norm_squared'\n inputs['learningRate'] = 0.5\n inputs['batchSize'] = 140\n stochasticGradientObj = stochasticGradient(inputs)\n stochasticGradientObj.start()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass stochasticGradient:\n\n def __init__(self, kwargs):\n self.inputVectors = kwargs['inputVectors']\n self.expectedOutput = kwargs['expectedOutput']\n self.noOfEpochs = kwargs['noOfEpochs']\n self.activationFnsForAllLayers = kwargs['activationFnsForAllLayers']\n self.noOfUnitsInEachLayer = kwargs['noOfUnitsInEachLayer']\n self.loss = kwargs['lossFn']\n self.learningRate = kwargs['learningRate']\n self.batchSize = kwargs['batchSize']\n self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2\n\n def start(self):\n self.setInitialWeights()\n self.startAlgo()\n self.plotLoss()\n self.plotDecisionBoundary()\n\n def plotDecisionBoundary(self):\n x_min = np.floor(min(self.inputVectors[:, 0]))\n x_max = np.ceil(max(self.inputVectors[:, 0]))\n y_min = np.floor(min(self.inputVectors[:, 1]))\n y_max = np.ceil(max(self.inputVectors[:, 1]))\n input = [(x, y) for x in np.arange(x_min, x_max, 0.05) for y in np.\n arange(y_min, y_max, 0.05)]\n inputT = np.array(input)\n output = self.forwardPass(inputT)\n for i in range(len(output)):\n if output[i] == 0:\n plt.plot(input[i][0], input[i][1], 'co')\n elif output[i] < 0:\n plt.plot(input[i][0], input[i][1], 'r.')\n elif output[i] > 0:\n plt.plot(input[i][0], input[i][1], 'b.')\n self.plotData()\n plt.show()\n\n def plotData(self):\n expectedOutputAsList = list(self.expectedOutput[:])\n positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == 1]\n negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if\n x == -1]\n positiveX = [self.inputVectors[j][0] for j in positiveIndices]\n positiveY = [self.inputVectors[j][1] for j in positiveIndices]\n negativeX = [self.inputVectors[j][0] for j in negativeIndices]\n negativeY = [self.inputVectors[j][1] for j in negativeIndices]\n plt.scatter(positiveX, positiveY, color='blue', marker='X')\n plt.scatter(negativeX, negativeY, color='red', marker='X')\n\n def plotLoss(self):\n plt.plot(range(len(self.loss_list)), self.loss_list, '--')\n plt.show()\n\n def setInitialWeights(self):\n self.setOfWeights = {}\n self.setOfWeightsForBiasTerm = {}\n for i in range(self.noOfHiddenLayers + 1):\n noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i + 1]\n noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]\n self.setOfWeightsForBiasTerm[i, i + 1] = np.zeros(shape=(\n noOfUnitsInNextLayer, 1))\n self.setOfWeights[i, i + 1] = np.random.normal(size=(\n noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))\n\n def startAlgo(self):\n self.loss_list = []\n j = 0\n avg_loss = 100\n noOfIterations = self.inputVectors.shape[0] // self.batchSize\n while j < self.noOfEpochs and avg_loss >= 0.01:\n k = 0\n avg_loss = 0\n while k < noOfIterations:\n self.predictedOutput = self.forwardPass(self.inputVectors)\n loss = self.getLoss()\n self.loss_list.append(loss)\n batchIndexRange = range(self.batchSize * k, self.batchSize *\n (k + 1))\n self.backpropagation(batchIndexRange)\n avg_loss += loss\n k += 1\n avg_loss = avg_loss / noOfIterations\n j += 1\n global start_time\n print('--- %s seconds ---' % (time.time() - start_time))\n\n def backpropagation(self, batchIndexRange):\n self.calculateActivationFnDerivative()\n self.getWeightUpdationForOutputLayer(batchIndexRange)\n self.getWeightUpdationForHiddenLayers(batchIndexRange)\n self.updateWeights()\n\n def updateWeights(self):\n for h in range(self.noOfHiddenLayers + 1):\n self.setOfWeights[h, h + 1\n ] -= self.learningRate * self.weightsDelta[h, h + 1]\n self.setOfWeightsForBiasTerm[h, h + 1\n ] -= self.learningRate * self.biasWeightsDelta[h, h + 1]\n\n def getWeightUpdationForHiddenLayers(self, batchIndexRange):\n self.deltaContribution = self.deltaContribution.transpose((0, 2, 1))\n for h in range(self.noOfHiddenLayers, 0, -1):\n weights = self.setOfWeights[h, h + 1]\n activationDerivative = self.activationDerivative[h][batchIndexRange\n ].transpose((0, 2, 1))\n self.deltaContribution = np.matmul(self.deltaContribution, \n weights * activationDerivative)\n activationPrevLayer = self.activation[h - 1][batchIndexRange]\n self.weightsDelta[h - 1, h] = np.mean(np.matmul(\n activationPrevLayer, self.deltaContribution), axis=0).T\n self.biasWeightsDelta[h - 1, h] = np.mean(self.\n deltaContribution, axis=0).T\n\n def getWeightUpdationForOutputLayer(self, batchIndexRange):\n self.weightsDelta = {}\n self.biasWeightsDelta = {}\n outputLayerIndex = self.noOfHiddenLayers + 1\n prevLayerToOutputLayerIndex = outputLayerIndex - 1\n predictedOutput = self.predictedOutput[batchIndexRange]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)[\n batchIndexRange]\n lossDerivativeFn = self.loss + 'Derivative'\n lossDerivative = globals()[lossDerivativeFn](predictedOutput,\n expectedOutput)\n self.deltaContribution = lossDerivative * self.activationDerivative[\n outputLayerIndex][batchIndexRange]\n activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][\n batchIndexRange]\n self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(np.matmul(self.deltaContribution,\n activationAtPrevLayer.transpose((0, 2, 1))), axis=0)\n self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex\n ] = np.mean(self.deltaContribution, axis=0)\n\n def calculateActivationFnDerivative(self):\n self.activationDerivative = {}\n for h in range(self.noOfHiddenLayers + 1):\n activationDerivativeFn = self.activationFnsForAllLayers[h\n ] + 'Derivative'\n self.activationDerivative[h + 1] = globals()[activationDerivativeFn\n ](self.weightedSums[h + 1])\n\n def getLoss(self):\n lossFn = globals()[self.loss]\n expectedOutput = np.expand_dims(self.expectedOutput, axis=2)\n return lossFn(self.predictedOutput, expectedOutput)\n\n def forwardPass(self, data):\n self.activation = {}\n self.weightedSums = {}\n self.activation[0] = np.expand_dims(data, axis=2)\n for h in range(self.noOfHiddenLayers + 1):\n self.weightedSums[h + 1] = np.matmul(self.setOfWeights[h, h + 1\n ], self.activation[h]) + self.setOfWeightsForBiasTerm[h, h + 1]\n activationFnForGivenLayer = self.activationFnsForAllLayers[h]\n self.activation[h + 1] = globals()[activationFnForGivenLayer](self\n .weightedSums[h + 1])\n outputLayerIndex = self.noOfHiddenLayers + 1\n return self.activation[outputLayerIndex]\n\n\n<mask token>\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef tanh(x):\n return np.tanh(x)\n\n\ndef l2_norm_squared(x, y):\n return np.mean((x - y) ** 2) / 2\n\n\ndef l2_norm_squaredDerivative(x, y):\n noOfDataPts = x.shape[0]\n return (x - y) / noOfDataPts\n\n\ndef sigmoidDerivative(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\n\ndef tanhDerivative(x):\n return 1 - tanh(x) ** 2\n\n\ndef ellipseFn(x, a, b):\n return b / a * (a ** 2 - x ** 2) ** 0.5\n\n\ndef runForLinearlySeparableData():\n args = {}\n noOfDataPts = 80\n shuffledIndices = np.random.permutation(noOfDataPts)\n args['inputVectors'] = (np.concatenate((np.random.normal(loc=10, size=[\n 40, 2]), np.random.normal(loc=20, size=[40, 2]))) / 20)[shuffledIndices\n ]\n args['expectedOutput'] = np.concatenate((np.ones(shape=(40, 1)), -np.\n ones(shape=(40, 1))))[shuffledIndices]\n args['noOfEpochs'] = 100000\n args['activationFnsForAllLayers'] = ['tanh'] * 3\n args['noOfUnitsInEachLayer'] = [2, 6, 6, 1]\n args['lossFn'] = 'l2_norm_squared'\n args['learningRate'] = 0.1\n args['batchSize'] = 1\n stochasticGradientObj = stochasticGradient(args)\n stochasticGradientObj.start()\n\n\ndef runForEllipseData():\n inputs = {}\n r = [2, 5]\n h = 0.2\n inputVectorsList = []\n expectedOutput = []\n for i in r:\n t = (i - -i) / h\n x = np.linspace(-i, i, t)\n vectorizedEllipseFn = np.vectorize(ellipseFn)\n y = vectorizedEllipseFn(x, i, i)\n for j in range(len(x)):\n inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]\n if i == 2:\n expectedOutput.append([1])\n expectedOutput.append([1])\n else:\n expectedOutput.append([-1])\n expectedOutput.append([-1])\n perm = np.random.permutation(140)\n inputs['inputVectors'] = np.array(inputVectorsList)[perm] / 5\n inputs['expectedOutput'] = np.array(expectedOutput)[perm]\n inputs['noOfEpochs'] = 200000\n inputs['activationFnsForAllLayers'] = ['tanh', 'tanh']\n inputs['noOfUnitsInEachLayer'] = [2, 3, 1]\n inputs['lossFn'] = 'l2_norm_squared'\n inputs['learningRate'] = 0.5\n inputs['batchSize'] = 140\n stochasticGradientObj = stochasticGradient(inputs)\n stochasticGradientObj.start()\n\n\ndef runForXORdata():\n inputs = {}\n inputs['inputVectors'] = np.array([[0, 0], [0, 1], [1, 1], [1, 0]])\n inputs['expectedOutput'] = np.array([[-1], [1], [-1], [1]])\n inputs['noOfEpochs'] = 200000\n inputs['activationFnsForAllLayers'] = ['tanh', 'tanh']\n inputs['noOfUnitsInEachLayer'] = [2, 3, 1]\n inputs['lossFn'] = 'l2_norm_squared'\n inputs['learningRate'] = 0.05\n inputs['batchSize'] = 1\n stochasticGradientObj = stochasticGradient(inputs)\n stochasticGradientObj.start()\n\n\nrunForLinearlySeparableData()\nrunForEllipseData()\nrunForXORdata()\n",
"step-5": "import time\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nclass stochasticGradient :\r\n def __init__( self , kwargs ) :\r\n self.inputVectors = kwargs[\"inputVectors\"]\r\n self.expectedOutput = kwargs[\"expectedOutput\"]\r\n self.noOfEpochs = kwargs[\"noOfEpochs\"]\r\n self.activationFnsForAllLayers = kwargs[\"activationFnsForAllLayers\"]\r\n self.noOfUnitsInEachLayer = kwargs[\"noOfUnitsInEachLayer\"]\r\n self.loss = kwargs[\"lossFn\"]\r\n self.learningRate = kwargs[\"learningRate\"]\r\n self.batchSize = kwargs[\"batchSize\"]\r\n self.noOfHiddenLayers = len(self.noOfUnitsInEachLayer) - 2\r\n\r\n def start(self) :\r\n self.setInitialWeights()\r\n self.startAlgo()\r\n self.plotLoss()\r\n self.plotDecisionBoundary() #Can only be used in case of 2-D data\r\n\r\n def plotDecisionBoundary(self) :\r\n x_min = np.floor(min( self.inputVectors[:,0] ))\r\n x_max = np.ceil(max( self.inputVectors[:,0] ))\r\n y_min = np.floor(min( self.inputVectors[:,1] ))\r\n y_max = np.ceil(max( self.inputVectors[:,1] ))\r\n input = [(x, y) for x in np.arange(x_min, x_max, .05) for y in np.arange(y_min, y_max, .05)]\r\n inputT = np.array( input )\r\n output = self.forwardPass( inputT )\r\n for i in range(len(output)):\r\n if output[i] == 0:\r\n plt.plot(input[i][0], input[i][1], 'co')\r\n elif output[i] < 0:\r\n plt.plot(input[i][0], input[i][1], 'r.')\r\n elif output[i] > 0:\r\n plt.plot(input[i][0], input[i][1], 'b.')\r\n self.plotData()\r\n plt.show()\r\n\r\n def plotData(self) :\r\n expectedOutputAsList = list(self.expectedOutput[:])\r\n positiveIndices = [i for i, x in enumerate(expectedOutputAsList) if x == 1]\r\n negativeIndices = [i for i, x in enumerate(expectedOutputAsList) if x == -1]\r\n positiveX = [self.inputVectors[j][0] for j in positiveIndices]\r\n positiveY = [self.inputVectors[j][1] for j in positiveIndices]\r\n negativeX = [self.inputVectors[j][0] for j in negativeIndices]\r\n negativeY = [self.inputVectors[j][1] for j in negativeIndices]\r\n plt.scatter(positiveX , positiveY , color = \"blue\" , marker = \"X\" )\r\n plt.scatter(negativeX , negativeY , color = \"red\" , marker = \"X\" )\r\n\r\n def plotLoss(self) :\r\n plt.plot(range(len(self.loss_list)) , self.loss_list , \"--\")\r\n plt.show()\r\n\r\n def setInitialWeights(self) :\r\n self.setOfWeights = {}\r\n self.setOfWeightsForBiasTerm = {}\r\n for i in range(self.noOfHiddenLayers + 1) :\r\n noOfUnitsInNextLayer = self.noOfUnitsInEachLayer[i+1]\r\n noOfUnitsInCurrentLayer = self.noOfUnitsInEachLayer[i]\r\n self.setOfWeightsForBiasTerm[i, i+1] = np.zeros(shape = (noOfUnitsInNextLayer, 1))\r\n self.setOfWeights[i, i+1] = np.random.normal(size = (noOfUnitsInNextLayer, noOfUnitsInCurrentLayer))\r\n\r\n def startAlgo(self) :\r\n self.loss_list = []\r\n j = 0\r\n avg_loss = 100\r\n noOfIterations = self.inputVectors.shape[0]//self.batchSize\r\n while j < self.noOfEpochs and avg_loss >= 0.01 :\r\n k = 0\r\n avg_loss = 0\r\n while k < noOfIterations :\r\n self.predictedOutput = self.forwardPass( self.inputVectors )\r\n loss = self.getLoss()\r\n self.loss_list.append( loss )\r\n batchIndexRange = range( self.batchSize*k , (self.batchSize*(k+1)))\r\n self.backpropagation( batchIndexRange )\r\n avg_loss += loss\r\n k += 1\r\n avg_loss = avg_loss/noOfIterations\r\n j += 1\r\n# print(\"list(zip(self.predictedOutput , self.expectedOutput)) : \" , list(zip(self.predictedOutput , self.expectedOutput)))\r\n global start_time\r\n print(\"--- %s seconds ---\" %(time.time()-start_time))\r\n\r\n def backpropagation(self , batchIndexRange) :\r\n self.calculateActivationFnDerivative()\r\n self.getWeightUpdationForOutputLayer( batchIndexRange )\r\n self.getWeightUpdationForHiddenLayers( batchIndexRange )\r\n self.updateWeights()\r\n\r\n def updateWeights(self) :\r\n for h in range(self.noOfHiddenLayers + 1) :\r\n self.setOfWeights[h,h+1] -= self.learningRate * self.weightsDelta[h,h+1]\r\n self.setOfWeightsForBiasTerm[h,h+1] -= self.learningRate * self.biasWeightsDelta[h,h+1]\r\n\r\n def getWeightUpdationForHiddenLayers(self , batchIndexRange) :\r\n self.deltaContribution = self.deltaContribution.transpose((0,2,1))\r\n for h in range(self.noOfHiddenLayers, 0, -1) :\r\n weights = self.setOfWeights[h, h+1]\r\n activationDerivative = self.activationDerivative[h][batchIndexRange].transpose((0,2,1))\r\n self.deltaContribution = np.matmul(self.deltaContribution , weights * activationDerivative)\r\n activationPrevLayer = self.activation[h-1][batchIndexRange]\r\n self.weightsDelta[h-1,h] = np.mean(np.matmul(activationPrevLayer , self.deltaContribution) , axis=0).T\r\n self.biasWeightsDelta[h-1,h] = np.mean(self.deltaContribution , axis=0).T\r\n\r\n def getWeightUpdationForOutputLayer(self , batchIndexRange) :\r\n self.weightsDelta = {}\r\n self.biasWeightsDelta = {}\r\n outputLayerIndex = self.noOfHiddenLayers+1\r\n prevLayerToOutputLayerIndex = outputLayerIndex-1\r\n predictedOutput = self.predictedOutput[batchIndexRange]\r\n expectedOutput = np.expand_dims(self.expectedOutput , axis=2)[batchIndexRange]\r\n lossDerivativeFn = self.loss + \"Derivative\"\r\n lossDerivative = globals()[lossDerivativeFn](predictedOutput, expectedOutput)\r\n self.deltaContribution = lossDerivative * self.activationDerivative[outputLayerIndex][batchIndexRange]\r\n activationAtPrevLayer = self.activation[prevLayerToOutputLayerIndex][batchIndexRange]\r\n self.weightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(np.matmul( self.deltaContribution , activationAtPrevLayer.transpose((0, 2, 1))) , axis=0)\r\n self.biasWeightsDelta[prevLayerToOutputLayerIndex, outputLayerIndex] = np.mean(self.deltaContribution , axis=0)\r\n\r\n def calculateActivationFnDerivative(self) :\r\n self.activationDerivative = {}\r\n for h in range( self.noOfHiddenLayers+1 ) :\r\n activationDerivativeFn = self.activationFnsForAllLayers[h] + \"Derivative\"\r\n self.activationDerivative[h+1] = globals()[activationDerivativeFn]( self.weightedSums[h+1] )\r\n\r\n def getLoss(self) :\r\n lossFn = globals()[ self.loss ]\r\n expectedOutput = np.expand_dims(self.expectedOutput , axis=2)\r\n return lossFn( self.predictedOutput , expectedOutput )\r\n\r\n def forwardPass(self , data) :\r\n self.activation = {}\r\n self.weightedSums = {}\r\n self.activation[0] = np.expand_dims( data , axis = 2 )\r\n for h in range( self.noOfHiddenLayers+1 ) :\r\n self.weightedSums[h+1] = np.matmul(self.setOfWeights[h,h+1] , self.activation[h]) + self.setOfWeightsForBiasTerm[h, h+1]\r\n activationFnForGivenLayer = self.activationFnsForAllLayers[h]\r\n self.activation[h+1] = globals()[activationFnForGivenLayer]( self.weightedSums[h+1] )\r\n outputLayerIndex = self.noOfHiddenLayers + 1\r\n return self.activation[outputLayerIndex]\r\n\r\nstart_time = time.time()\r\n\r\ndef sigmoid(x) :\r\n return 1/(1+np.exp(-x))\r\n\r\ndef tanh(x) :\r\n return np.tanh(x)\r\n\r\ndef l2_norm_squared(x, y) :\r\n return np.mean((x-y)**2)/2\r\n\r\ndef l2_norm_squaredDerivative(x, y) :\r\n noOfDataPts = x.shape[0]\r\n return (x-y)/noOfDataPts\r\n\r\ndef sigmoidDerivative(x) :\r\n return sigmoid(x)*(1-sigmoid(x))\r\n\r\ndef tanhDerivative(x) :\r\n return (1-tanh(x) ** 2)\r\n\r\ndef ellipseFn(x , a , b) :\r\n return (b/a)*((a**2-x**2)**0.5)\r\n\r\n# CREATING LINEARLY SEPARABLE DATA\r\ndef runForLinearlySeparableData() :\r\n args = {}\r\n noOfDataPts = 80\r\n shuffledIndices = np.random.permutation( noOfDataPts )\r\n args[\"inputVectors\"] = (np.concatenate((np.random.normal(loc=10, size=[40, 2]), np.random.normal(loc=20, size=[40, 2]))) / 20)[shuffledIndices]\r\n args[\"expectedOutput\"] = (np.concatenate((np.ones(shape=(40, 1)), -np.ones(shape=(40, 1)))))[shuffledIndices]\r\n args[\"noOfEpochs\"] = 100000\r\n args[\"activationFnsForAllLayers\"] = [\"tanh\"]*3\r\n args[\"noOfUnitsInEachLayer\"] = [ 2, 6, 6, 1 ]\r\n args[\"lossFn\"] = \"l2_norm_squared\"\r\n args[\"learningRate\"] = 0.1\r\n args[\"batchSize\"] = 1\r\n stochasticGradientObj = stochasticGradient( args )\r\n stochasticGradientObj.start()\r\n\r\n# CREATING TWO CONCENTRIC ELLIPSES\r\ndef runForEllipseData() :\r\n inputs = {}\r\n r = [ 2 , 5 ]\r\n h = 0.2\r\n inputVectorsList = []\r\n expectedOutput = []\r\n for i in r :\r\n t = (i-(-i))/h\r\n x = np.linspace(-i , i , t)\r\n vectorizedEllipseFn = np.vectorize( ellipseFn )\r\n y = vectorizedEllipseFn( x , i , i )\r\n for j in range(len(x)):\r\n inputVectorsList += [(x[j], -y[j]), (x[j], y[j])]\r\n if i == 2 :\r\n expectedOutput.append([1])\r\n expectedOutput.append([1])\r\n else:\r\n expectedOutput.append([-1])\r\n expectedOutput.append([-1])\r\n perm = np.random.permutation(140)\r\n inputs[\"inputVectors\"] = np.array(inputVectorsList)[perm]/5\r\n inputs[\"expectedOutput\"] = np.array(expectedOutput)[perm]\r\n inputs[\"noOfEpochs\"] = 200000\r\n inputs[\"activationFnsForAllLayers\"] = [\"tanh\" , \"tanh\" ]\r\n inputs[\"noOfUnitsInEachLayer\"] = [ 2 , 3 , 1 ]\r\n inputs[\"lossFn\"] = \"l2_norm_squared\"\r\n inputs[\"learningRate\"] = 0.5\r\n inputs[\"batchSize\"] = 140\r\n stochasticGradientObj = stochasticGradient( inputs )\r\n stochasticGradientObj.start()\r\n\r\n# CREATING XOR DATA\r\ndef runForXORdata() :\r\n inputs = {}\r\n inputs[\"inputVectors\"] = np.array([[0,0] , [0,1] , [1,1] , [1,0]])\r\n inputs[\"expectedOutput\"] = np.array([[-1],[1],[-1],[1]])\r\n inputs[\"noOfEpochs\"] = 200000\r\n inputs[\"activationFnsForAllLayers\"] = [\"tanh\" , \"tanh\" ]\r\n inputs[\"noOfUnitsInEachLayer\"] = [ 2 , 3 , 1 ]\r\n inputs[\"lossFn\"] = \"l2_norm_squared\"\r\n inputs[\"learningRate\"] = 0.05\r\n inputs[\"batchSize\"] = 1\r\n stochasticGradientObj = stochasticGradient( inputs )\r\n stochasticGradientObj.start()\r\n\r\nrunForLinearlySeparableData()\r\nrunForEllipseData()\r\nrunForXORdata()\r\n",
"step-ids": [
17,
18,
23,
26,
29
]
}
|
[
17,
18,
23,
26,
29
] |
<|reserved_special_token_0|>
class lexicon0(db.Model):
word = db.StringProperty(required=True)
known = db.StringListProperty(indexed=False)
<|reserved_special_token_0|>
def getjp(before, wordlist, after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i]) / querylength(wordli[i])
dictionary = dict(zip(wordli, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
def getjp1(before, wordlist, after):
global REQUESTURL
string = ''
for x in wordlist:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +
'?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
GENURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM +
'/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist, 0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try:
dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query: 0})
try:
if query != cquery:
dictionary.update(getdictionary(cquery.split()))
except:
dictionary.update({cquery: 0})
try:
if query != squery:
dictionary.update(getdictionary(squery.split()))
except:
dictionary.update({squery: 0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('', finallist, '')
final = list()
for i in range(len(result)):
final.append(10 ** result[i][1])
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0]) + '\t' +
printresult[i] + '\n')
except:
self.response.out.write(query + '\t' + str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL + '\n')
self.response.out.write(GENURL)
<|reserved_special_token_0|>
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist, 0)
self.response.out.write(splitted)
<|reserved_special_token_0|>
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word + ' '
string = string.strip()
return string
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round(problist[i] / tot, 3)))
return returnlist
<|reserved_special_token_0|>
def main():
run_wsgi_app(application)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class lexicon0(db.Model):
word = db.StringProperty(required=True)
known = db.StringListProperty(indexed=False)
def lexicon_key(lexicon_name=None):
return db.Key.from_path('lexicon0', lexicon_name or 'default')
<|reserved_special_token_0|>
def getjp(before, wordlist, after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i]) / querylength(wordli[i])
dictionary = dict(zip(wordli, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
def getjp1(before, wordlist, after):
global REQUESTURL
string = ''
for x in wordlist:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +
'?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
GENURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM +
'/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist, 0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try:
dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query: 0})
try:
if query != cquery:
dictionary.update(getdictionary(cquery.split()))
except:
dictionary.update({cquery: 0})
try:
if query != squery:
dictionary.update(getdictionary(squery.split()))
except:
dictionary.update({squery: 0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('', finallist, '')
final = list()
for i in range(len(result)):
final.append(10 ** result[i][1])
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0]) + '\t' +
printresult[i] + '\n')
except:
self.response.out.write(query + '\t' + str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL + '\n')
self.response.out.write(GENURL)
<|reserved_special_token_0|>
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist, 0)
self.response.out.write(splitted)
<|reserved_special_token_0|>
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word + ' '
string = string.strip()
return string
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round(problist[i] / tot, 3)))
return returnlist
<|reserved_special_token_0|>
def main():
run_wsgi_app(application)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class lexicon0(db.Model):
word = db.StringProperty(required=True)
known = db.StringListProperty(indexed=False)
def lexicon_key(lexicon_name=None):
return db.Key.from_path('lexicon0', lexicon_name or 'default')
def combination(wordlist, t):
tempc = wordlist
combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc
[i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]
cquery = listtostr(tempc)
combinationqueryset.append(cquery)
results = getjp1('', combinationqueryset, '')
dictionary = dict(results)
x = results.index((cquery, dictionary[cquery]))
if t == 0:
t = dictionary[cquery]
if results[0][0] == cquery:
return cquery, results[0][1], t
else:
dictionary = dict(results)
x = results.index((cquery, dictionary[cquery]))
y = list()
for i in range(x):
y.append(combinationqueryset.index(results[i][0]))
y.sort(reverse=True)
cache = wordlist
for z in y:
cache[z] += cache[z + 1]
del cache[z + 1]
return combination(cache, t)
<|reserved_special_token_0|>
def getjp(before, wordlist, after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i]) / querylength(wordli[i])
dictionary = dict(zip(wordli, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
def getjp1(before, wordlist, after):
global REQUESTURL
string = ''
for x in wordlist:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +
'?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
GENURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM +
'/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist, 0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try:
dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query: 0})
try:
if query != cquery:
dictionary.update(getdictionary(cquery.split()))
except:
dictionary.update({cquery: 0})
try:
if query != squery:
dictionary.update(getdictionary(squery.split()))
except:
dictionary.update({squery: 0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('', finallist, '')
final = list()
for i in range(len(result)):
final.append(10 ** result[i][1])
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0]) + '\t' +
printresult[i] + '\n')
except:
self.response.out.write(query + '\t' + str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL + '\n')
self.response.out.write(GENURL)
def getdictionary(wordelist):
global MONTH, DATASET, NGRAM, PROB
dictionaryy = dict()
rpcs = []
for i in range(len(wordelist)):
if i < 3:
t = 0
else:
t = i - 3
form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[
t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,
'd': DATASET, 'ng': NGRAM, 'pp': PROB}
formdata = urllib.urlencode(form_fields)
rpc = urlfetch.create_rpc()
url = 'http://timetest.forbackend.appspot.com/wordspellcheck'
urlfetch.make_fetch_call(rpc, url, payload=formdata, method=
urlfetch.POST)
rpcs.append(rpc)
resultts = list()
for rpc in rpcs:
result = rpc.get_result()
resultts.append(result.content)
dictionaryy[listtostr(wordelist)] = 0
for i in range(len(wordelist)):
if resultts[i] == wordelist[i]:
continue
else:
for j in range(i, len(wordelist) + 1):
pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])
dictionaryy[pp] = 0
return dictionaryy
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist, 0)
self.response.out.write(splitted)
def querylength(query):
liste = query.split()
counte = 0
for x in liste:
if len(x) > 1:
counte += 1
if counte == 0:
return 1
else:
return counte
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word + ' '
string = string.strip()
return string
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round(problist[i] / tot, 3)))
return returnlist
<|reserved_special_token_0|>
def main():
run_wsgi_app(application)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import re
import cgi
import os
import urllib
import urllib2
from time import sleep
from google.appengine.api import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import backends
from google.appengine.api import logservice
logservice.AUTOFLUSH_EVERY_SECONDS = None
logservice.AUTOFLUSH_EVERY_BYTES = None
logservice.AUTOFLUSH_ENABLED = False
MONTH = 'jun09'
NGRAM = '3'
PROB = 'jp'
DATASET = 'bing-body'
REQUESTURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +
'?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
GENURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM +
'/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
class lexicon0(db.Model):
word = db.StringProperty(required=True)
known = db.StringListProperty(indexed=False)
def lexicon_key(lexicon_name=None):
return db.Key.from_path('lexicon0', lexicon_name or 'default')
def combination(wordlist, t):
tempc = wordlist
combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc
[i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]
cquery = listtostr(tempc)
combinationqueryset.append(cquery)
results = getjp1('', combinationqueryset, '')
dictionary = dict(results)
x = results.index((cquery, dictionary[cquery]))
if t == 0:
t = dictionary[cquery]
if results[0][0] == cquery:
return cquery, results[0][1], t
else:
dictionary = dict(results)
x = results.index((cquery, dictionary[cquery]))
y = list()
for i in range(x):
y.append(combinationqueryset.index(results[i][0]))
y.sort(reverse=True)
cache = wordlist
for z in y:
cache[z] += cache[z + 1]
del cache[z + 1]
return combination(cache, t)
def spacesplits(wordlist):
temps = wordlist
query = listtostr(temps)
strings = []
for i in range(len(temps)):
for y in range(1, len(temps[i])):
strings.append(listtostr(temps[:i] + list([temps[i][:y], temps[
i][y:]]) + temps[i + 1:]))
strings.append(query)
results = getjp1('', strings, '')
if results[0][0] == query:
return query, results[0][1]
else:
return spacesplits(results[0][0].split())
def getjp(before, wordlist, after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i]) / querylength(wordli[i])
dictionary = dict(zip(wordli, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
def getjp1(before, wordlist, after):
global REQUESTURL
string = ''
for x in wordlist:
string += before + ' ' + str(x) + ' ' + after + '\n'
string = string.strip()
jps = list()
jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(
).split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist, jps))
return sorted(dictionary.iteritems(), key=lambda entity: entity[1],
reverse=True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +
'?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
GENURL = (
'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +
DATASET + '/' + MONTH + '/' + NGRAM +
'/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist, 0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try:
dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query: 0})
try:
if query != cquery:
dictionary.update(getdictionary(cquery.split()))
except:
dictionary.update({cquery: 0})
try:
if query != squery:
dictionary.update(getdictionary(squery.split()))
except:
dictionary.update({squery: 0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('', finallist, '')
final = list()
for i in range(len(result)):
final.append(10 ** result[i][1])
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0]) + '\t' +
printresult[i] + '\n')
except:
self.response.out.write(query + '\t' + str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL + '\n')
self.response.out.write(GENURL)
def getdictionary(wordelist):
global MONTH, DATASET, NGRAM, PROB
dictionaryy = dict()
rpcs = []
for i in range(len(wordelist)):
if i < 3:
t = 0
else:
t = i - 3
form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[
t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,
'd': DATASET, 'ng': NGRAM, 'pp': PROB}
formdata = urllib.urlencode(form_fields)
rpc = urlfetch.create_rpc()
url = 'http://timetest.forbackend.appspot.com/wordspellcheck'
urlfetch.make_fetch_call(rpc, url, payload=formdata, method=
urlfetch.POST)
rpcs.append(rpc)
resultts = list()
for rpc in rpcs:
result = rpc.get_result()
resultts.append(result.content)
dictionaryy[listtostr(wordelist)] = 0
for i in range(len(wordelist)):
if resultts[i] == wordelist[i]:
continue
else:
for j in range(i, len(wordelist) + 1):
pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])
dictionaryy[pp] = 0
return dictionaryy
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist, 0)
self.response.out.write(splitted)
def querylength(query):
liste = query.split()
counte = 0
for x in liste:
if len(x) > 1:
counte += 1
if counte == 0:
return 1
else:
return counte
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word + ' '
string = string.strip()
return string
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round(problist[i] / tot, 3)))
return returnlist
application = webapp.WSGIApplication([('/mainpage', maintest), ('/maintest',
mainpage), ('/split', splittest)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import re
import cgi
import os
import urllib
import urllib2
from time import sleep
from google.appengine.api import taskqueue
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import backends
from google.appengine.api import logservice
logservice.AUTOFLUSH_EVERY_SECONDS = None
logservice.AUTOFLUSH_EVERY_BYTES = None
logservice.AUTOFLUSH_ENABLED = False
MONTH = "jun09"
NGRAM = "3"
PROB = "jp"
DATASET = "bing-body"
REQUESTURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/"+PROB+"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
GENURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
class lexicon0(db.Model):
word = db.StringProperty(required = True)
known = db.StringListProperty(indexed = False)
def lexicon_key(lexicon_name=None):
return db.Key.from_path('lexicon0', lexicon_name or 'default')
def combination(wordlist,t):#argument t is to notify that it is the main query while using cobination for first time
tempc = wordlist
combinationqueryset = [listtostr(tempc[:i] +
["%s%s"%(tempc[i],tempc[i+1])] +
tempc[i+2:] ) for i in range(0, len(tempc)-1)]
cquery = listtostr(tempc)
combinationqueryset.append(cquery)
results = getjp1('',combinationqueryset,'')
dictionary = dict(results)
x = results.index((cquery,dictionary[cquery]))
if (t==0): t = dictionary[cquery]
if (results[0][0] == cquery):
return (cquery,results[0][1],t)
else:
dictionary = dict(results)
x = results.index((cquery,dictionary[cquery]))
y = list()
for i in range(x):
y.append(combinationqueryset.index(results[i][0]))
y.sort(reverse = True)
cache = wordlist
for z in y:
cache[z] += cache[z+1]
del cache[z+1]
return combination(cache,t)
def spacesplits(wordlist):
temps = wordlist
query = listtostr(temps)
strings = []
for i in range(len(temps)):
for y in range(1,len(temps[i])):
strings.append(listtostr(temps[:i]+list([temps[i][:y],temps[i][y:]])+temps[i+1:]))
strings.append(query)
results = getjp1('',strings,'')
if (results[0][0] == query):
return (query,results[0][1])
else:
return spacesplits(results[0][0].split())
def getjp(before,wordlist,after):
global REQUESTURL
wordli = wordlist
string = ''
for x in wordli:
string += before+" "+str(x)+" "+after+"\n"
string = string.strip()
jps = list()
jps = urllib2.urlopen(
urllib2.Request(REQUESTURL,str(string))).read().split()
for i in range(len(jps)):
jps[i] = float(jps[i])/(querylength(wordli[i]))
dictionary = dict(zip(wordli,jps))
return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)
def getjp1(before,wordlist,after):
global REQUESTURL
string = ''
for x in wordlist:
string += before+" "+str(x)+" "+after+"\n"
string = string.strip()
jps = list()
jps = urllib2.urlopen(
urllib2.Request(REQUESTURL,str(string))).read().split()
for i in range(len(jps)):
jps[i] = float(jps[i])
dictionary = dict(zip(wordlist,jps))
return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)
class mainpage(webapp.RequestHandler):
def get(self):
global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL
if len(self.request.get('m')):
MONTH = str(self.request.get('m'))
if len(self.request.get('d')):
DATASET = str(self.request.get('d'))
if len(self.request.get('ng')):
NGRAM = str(self.request.get('ng'))
if len(self.request.get('pp')):
PROB = str(self.request.get('pp'))
REQUESTURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/"+PROB+"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
GENURL = "http://web-ngram.research.microsoft.com/rest/lookup.svc/"+DATASET+"/"+MONTH+"/"+NGRAM+"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e"
query = str(self.request.get('q'))
wordlist = query.strip().split()
dictionary = dict()
try:
cquery = combination(wordlist,0)[0]
except:
cquery = query
try:
wordlist = query.strip().split()
squery = spacesplits(wordlist)[0]
except:
squery = query
try: dictionary.update(getdictionary(wordlist))
except:
dictionary.update({query:0})
try:
if (query != cquery): dictionary.update(getdictionary(cquery.split()))
except: dictionary.update({cquery:0})
try:
if (query != squery): dictionary.update(getdictionary(squery.split()))
except: dictionary.update({squery:0})
finallist = dictionary.keys()
self.response.headers['Content-Type'] = 'text/plain'
try:
result = getjp('',finallist,'')
final = list()
for i in range(len(result)):
final.append(10**((result[i][1])))
printresult = normalize(final)
for i in range(len(printresult)):
self.response.out.write(str(result[i][0])+"\t"+printresult[i]+"\n")
except:
self.response.out.write(query+"\t"+str(1))
class maintest(webapp.RequestHandler):
def get(self):
global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(REQUESTURL+"\n")
self.response.out.write(GENURL)
def getdictionary(wordelist):
global MONTH,DATASET,NGRAM,PROB
dictionaryy = dict()
rpcs = []
for i in range(len(wordelist)):
if i<3: t=0
else: t = i-3
form_fields = {
"word": wordelist[i],
"before": listtostr(wordelist[t:i]),
"after": listtostr(wordelist[i+1:i+4]),
"m": MONTH,
"d": DATASET,
"ng": NGRAM,
"pp": PROB
}
formdata = urllib.urlencode(form_fields)
rpc = urlfetch.create_rpc()
url = "http://timetest.forbackend.appspot.com/wordspellcheck"
#rpc.callback = create_callback(rpc)
urlfetch.make_fetch_call(rpc,
url,
payload = formdata,
method = urlfetch.POST)
rpcs.append(rpc)
resultts = list()
for rpc in rpcs:
result = rpc.get_result()
resultts.append(result.content)
#self.response.out.write(results)
#self.response.out.write(wordee)
dictionaryy[listtostr(wordelist)] = 0
for i in range(len(wordelist)):
if resultts[i] == wordelist[i]: continue
else:
for j in range(i,len(wordelist)+1):
pp = listtostr(wordelist[:i]+resultts[i:j]+wordelist[j:])
dictionaryy[pp] = 0
return dictionaryy
class splittest(webapp.RequestHandler):
def get(self):
query = self.request.get('q')
wordlist = query.split()
splitted = combination(wordlist,0)
self.response.out.write(splitted)
def querylength(query):
liste = query.split()
counte = 0
for x in liste:
if len(x)>1: counte += 1
if counte == 0: return 1
else: return counte
def listtostr(wordlist):
string = ''
for word in wordlist:
string += word+" "
string = string.strip()
return string
#def create_callback(rpc):
def normalize(problist):
tot = 0
for x in problist:
tot += x
returnlist = list()
for i in range(len(problist)):
returnlist.append(str(round((problist[i]/tot),3)))
return returnlist
application = webapp.WSGIApplication([
('/mainpage',maintest),#### the main speller is in main page web handler as i submitted maintest as the official submission i changed this
('/maintest',mainpage),
('/split',splittest)],
debug = True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "c8a6a8633f863e0350157346106a747096d26939",
"index": 9912,
"step-1": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\n<mask token>\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\n<mask token>\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\n<mask token>\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\n<mask token>\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist, t):\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc\n [i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('', combinationqueryset, '')\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n if t == 0:\n t = dictionary[cquery]\n if results[0][0] == cquery:\n return cquery, results[0][1], t\n else:\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse=True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z + 1]\n del cache[z + 1]\n return combination(cache, t)\n\n\n<mask token>\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\ndef getdictionary(wordelist):\n global MONTH, DATASET, NGRAM, PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i < 3:\n t = 0\n else:\n t = i - 3\n form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[\n t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,\n 'd': DATASET, 'ng': NGRAM, 'pp': PROB}\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = 'http://timetest.forbackend.appspot.com/wordspellcheck'\n urlfetch.make_fetch_call(rpc, url, payload=formdata, method=\n urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]:\n continue\n else:\n for j in range(i, len(wordelist) + 1):\n pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x) > 1:\n counte += 1\n if counte == 0:\n return 1\n else:\n return counte\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-4": "import re\nimport cgi\nimport os\nimport urllib\nimport urllib2\nfrom time import sleep\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import backends\nfrom google.appengine.api import logservice\nlogservice.AUTOFLUSH_EVERY_SECONDS = None\nlogservice.AUTOFLUSH_EVERY_BYTES = None\nlogservice.AUTOFLUSH_ENABLED = False\nMONTH = 'jun09'\nNGRAM = '3'\nPROB = 'jp'\nDATASET = 'bing-body'\nREQUESTURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\nGENURL = ('http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required=True)\n known = db.StringListProperty(indexed=False)\n\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist, t):\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] + ['%s%s' % (tempc[i], tempc\n [i + 1])] + tempc[i + 2:]) for i in range(0, len(tempc) - 1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('', combinationqueryset, '')\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n if t == 0:\n t = dictionary[cquery]\n if results[0][0] == cquery:\n return cquery, results[0][1], t\n else:\n dictionary = dict(results)\n x = results.index((cquery, dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse=True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z + 1]\n del cache[z + 1]\n return combination(cache, t)\n\n\ndef spacesplits(wordlist):\n temps = wordlist\n query = listtostr(temps)\n strings = []\n for i in range(len(temps)):\n for y in range(1, len(temps[i])):\n strings.append(listtostr(temps[:i] + list([temps[i][:y], temps[\n i][y:]]) + temps[i + 1:]))\n strings.append(query)\n results = getjp1('', strings, '')\n if results[0][0] == query:\n return query, results[0][1]\n else:\n return spacesplits(results[0][0].split())\n\n\ndef getjp(before, wordlist, after):\n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i]) / querylength(wordli[i])\n dictionary = dict(zip(wordli, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\ndef getjp1(before, wordlist, after):\n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before + ' ' + str(x) + ' ' + after + '\\n'\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(urllib2.Request(REQUESTURL, str(string))).read(\n ).split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist, jps))\n return sorted(dictionary.iteritems(), key=lambda entity: entity[1],\n reverse=True)\n\n\nclass mainpage(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM + '/' + PROB +\n '?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n GENURL = (\n 'http://web-ngram.research.microsoft.com/rest/lookup.svc/' +\n DATASET + '/' + MONTH + '/' + NGRAM +\n '/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e')\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist, 0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try:\n dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query: 0})\n try:\n if query != cquery:\n dictionary.update(getdictionary(cquery.split()))\n except:\n dictionary.update({cquery: 0})\n try:\n if query != squery:\n dictionary.update(getdictionary(squery.split()))\n except:\n dictionary.update({squery: 0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('', finallist, '')\n final = list()\n for i in range(len(result)):\n final.append(10 ** result[i][1])\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0]) + '\\t' +\n printresult[i] + '\\n')\n except:\n self.response.out.write(query + '\\t' + str(1))\n\n\nclass maintest(webapp.RequestHandler):\n\n def get(self):\n global MONTH, DATASET, NGRAM, PROB, REQUESTURL, GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL + '\\n')\n self.response.out.write(GENURL)\n\n\ndef getdictionary(wordelist):\n global MONTH, DATASET, NGRAM, PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i < 3:\n t = 0\n else:\n t = i - 3\n form_fields = {'word': wordelist[i], 'before': listtostr(wordelist[\n t:i]), 'after': listtostr(wordelist[i + 1:i + 4]), 'm': MONTH,\n 'd': DATASET, 'ng': NGRAM, 'pp': PROB}\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = 'http://timetest.forbackend.appspot.com/wordspellcheck'\n urlfetch.make_fetch_call(rpc, url, payload=formdata, method=\n urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]:\n continue\n else:\n for j in range(i, len(wordelist) + 1):\n pp = listtostr(wordelist[:i] + resultts[i:j] + wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n\nclass splittest(webapp.RequestHandler):\n\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist, 0)\n self.response.out.write(splitted)\n\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x) > 1:\n counte += 1\n if counte == 0:\n return 1\n else:\n return counte\n\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word + ' '\n string = string.strip()\n return string\n\n\ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round(problist[i] / tot, 3)))\n return returnlist\n\n\napplication = webapp.WSGIApplication([('/mainpage', maintest), ('/maintest',\n mainpage), ('/split', splittest)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\n\nimport re\nimport cgi\nimport os\nimport urllib\nimport urllib2\n\nfrom time import sleep\n\nfrom google.appengine.api import taskqueue\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom google.appengine.api import urlfetch\nfrom google.appengine.api import backends\nfrom google.appengine.api import logservice\nlogservice.AUTOFLUSH_EVERY_SECONDS = None\nlogservice.AUTOFLUSH_EVERY_BYTES = None\nlogservice.AUTOFLUSH_ENABLED = False\n\nMONTH = \"jun09\"\nNGRAM = \"3\"\nPROB = \"jp\"\nDATASET = \"bing-body\"\nREQUESTURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/\"+PROB+\"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\nGENURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\n\n\nclass lexicon0(db.Model):\n word = db.StringProperty(required = True)\n known = db.StringListProperty(indexed = False)\n\ndef lexicon_key(lexicon_name=None):\n return db.Key.from_path('lexicon0', lexicon_name or 'default')\n\n\ndef combination(wordlist,t):#argument t is to notify that it is the main query while using cobination for first time\n tempc = wordlist\n combinationqueryset = [listtostr(tempc[:i] +\n [\"%s%s\"%(tempc[i],tempc[i+1])] +\n tempc[i+2:] ) for i in range(0, len(tempc)-1)]\n cquery = listtostr(tempc)\n combinationqueryset.append(cquery)\n results = getjp1('',combinationqueryset,'')\n dictionary = dict(results)\n x = results.index((cquery,dictionary[cquery]))\n if (t==0): t = dictionary[cquery]\n if (results[0][0] == cquery):\n return (cquery,results[0][1],t)\n else:\n dictionary = dict(results)\n x = results.index((cquery,dictionary[cquery]))\n y = list()\n for i in range(x):\n y.append(combinationqueryset.index(results[i][0]))\n y.sort(reverse = True)\n cache = wordlist\n for z in y:\n cache[z] += cache[z+1]\n del cache[z+1]\n return combination(cache,t)\n \ndef spacesplits(wordlist):\n temps = wordlist\n query = listtostr(temps)\n strings = []\n for i in range(len(temps)):\n for y in range(1,len(temps[i])):\n strings.append(listtostr(temps[:i]+list([temps[i][:y],temps[i][y:]])+temps[i+1:]))\n strings.append(query) \n results = getjp1('',strings,'')\n if (results[0][0] == query):\n return (query,results[0][1])\n else:\n return spacesplits(results[0][0].split())\n\n\n\ndef getjp(before,wordlist,after): \n global REQUESTURL\n wordli = wordlist\n string = ''\n for x in wordli:\n string += before+\" \"+str(x)+\" \"+after+\"\\n\"\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(\n urllib2.Request(REQUESTURL,str(string))).read().split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])/(querylength(wordli[i]))\n dictionary = dict(zip(wordli,jps))\n return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)\n\ndef getjp1(before,wordlist,after): \n global REQUESTURL\n string = ''\n for x in wordlist:\n string += before+\" \"+str(x)+\" \"+after+\"\\n\"\n string = string.strip()\n jps = list()\n jps = urllib2.urlopen(\n urllib2.Request(REQUESTURL,str(string))).read().split()\n for i in range(len(jps)):\n jps[i] = float(jps[i])\n dictionary = dict(zip(wordlist,jps))\n return sorted(dictionary.iteritems(), key = lambda entity:entity[1], reverse = True)\n\nclass mainpage(webapp.RequestHandler):\n def get(self):\n global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL\n if len(self.request.get('m')):\n MONTH = str(self.request.get('m'))\n if len(self.request.get('d')):\n DATASET = str(self.request.get('d'))\n if len(self.request.get('ng')):\n NGRAM = str(self.request.get('ng'))\n if len(self.request.get('pp')):\n PROB = str(self.request.get('pp'))\n REQUESTURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/\"+PROB+\"?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\" \n GENURL = \"http://web-ngram.research.microsoft.com/rest/lookup.svc/\"+DATASET+\"/\"+MONTH+\"/\"+NGRAM+\"/gen?u=888b8bfe-a203-43c6-a303-ab8e8d47b38e\"\n query = str(self.request.get('q'))\n wordlist = query.strip().split()\n dictionary = dict()\n try:\n cquery = combination(wordlist,0)[0]\n except:\n cquery = query\n try:\n wordlist = query.strip().split()\n squery = spacesplits(wordlist)[0]\n except:\n squery = query\n try: dictionary.update(getdictionary(wordlist))\n except:\n dictionary.update({query:0})\n try:\n if (query != cquery): dictionary.update(getdictionary(cquery.split()))\n except: dictionary.update({cquery:0})\n try:\n if (query != squery): dictionary.update(getdictionary(squery.split()))\n except: dictionary.update({squery:0})\n finallist = dictionary.keys()\n self.response.headers['Content-Type'] = 'text/plain'\n try:\n result = getjp('',finallist,'')\n final = list()\n for i in range(len(result)):\n final.append(10**((result[i][1])))\n printresult = normalize(final)\n for i in range(len(printresult)):\n self.response.out.write(str(result[i][0])+\"\\t\"+printresult[i]+\"\\n\")\n except:\n self.response.out.write(query+\"\\t\"+str(1))\n \n\n \nclass maintest(webapp.RequestHandler):\n def get(self):\n global MONTH,DATASET,NGRAM,PROB,REQUESTURL,GENURL\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(REQUESTURL+\"\\n\")\n self.response.out.write(GENURL)\n \n\n\ndef getdictionary(wordelist):\n global MONTH,DATASET,NGRAM,PROB\n dictionaryy = dict()\n rpcs = []\n for i in range(len(wordelist)):\n if i<3: t=0\n else: t = i-3\n form_fields = {\n \"word\": wordelist[i],\n \"before\": listtostr(wordelist[t:i]),\n \"after\": listtostr(wordelist[i+1:i+4]),\n \"m\": MONTH,\n \"d\": DATASET,\n \"ng\": NGRAM,\n \"pp\": PROB\n }\n formdata = urllib.urlencode(form_fields)\n rpc = urlfetch.create_rpc()\n url = \"http://timetest.forbackend.appspot.com/wordspellcheck\"\n #rpc.callback = create_callback(rpc)\n urlfetch.make_fetch_call(rpc,\n url,\n payload = formdata,\n method = urlfetch.POST)\n rpcs.append(rpc)\n resultts = list()\n for rpc in rpcs:\n result = rpc.get_result()\n resultts.append(result.content)\n #self.response.out.write(results)\n #self.response.out.write(wordee)\n dictionaryy[listtostr(wordelist)] = 0\n for i in range(len(wordelist)):\n if resultts[i] == wordelist[i]: continue\n else:\n for j in range(i,len(wordelist)+1):\n pp = listtostr(wordelist[:i]+resultts[i:j]+wordelist[j:])\n dictionaryy[pp] = 0\n return dictionaryy\n\n \nclass splittest(webapp.RequestHandler):\n def get(self):\n query = self.request.get('q')\n wordlist = query.split()\n splitted = combination(wordlist,0)\n self.response.out.write(splitted)\n\ndef querylength(query):\n liste = query.split()\n counte = 0\n for x in liste:\n if len(x)>1: counte += 1\n if counte == 0: return 1\n else: return counte\n\ndef listtostr(wordlist):\n string = ''\n for word in wordlist:\n string += word+\" \"\n string = string.strip()\n return string\n#def create_callback(rpc):\n \ndef normalize(problist):\n tot = 0\n for x in problist:\n tot += x\n returnlist = list()\n for i in range(len(problist)):\n returnlist.append(str(round((problist[i]/tot),3)))\n return returnlist\n \napplication = webapp.WSGIApplication([\n ('/mainpage',maintest),#### the main speller is in main page web handler as i submitted maintest as the official submission i changed this\n ('/maintest',mainpage),\n ('/split',splittest)],\n debug = True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
13,
14,
17,
21,
22
]
}
|
[
13,
14,
17,
21,
22
] |
import re
class Markdown:
__formattedFile = []
__analyzing = []
def __processSingleLine(self, line):
if(self.__isHeading(line)):
self.__process("p")
self.__analyzing.append(re.sub("(#{1,6})", "", line).strip())
self.__process("h" + str(len(re.split("\s", line)[0])))
elif(self.__isHeading2(line)):
self.__process("h1")
elif(self.__isBlankLine(line)):
self.__process("p")
else:
self.__analyzing.append(line)
def __isHeading(self, line):
return re.match("^(#{1,6})(\s)+", line) != None
def __isHeading2(self, line):
if(len(self.__analyzing) == 1 and re.match("^[\=]+$", line) != None):
return True
return False
def __isBlankLine(self, line):
return re.match("^[\n]", line) != None
def __convertAttribute(self, markdown, tag):
lineIndex1 = -1
wordIndex1 = -1
lineIndex2 = -1
wordIndex2 = -1
for lIndex in range(len(self.__analyzing)):
words = re.split("\s", self.__analyzing[lIndex])
for wIndex in range(len(words)):
if(lineIndex1 == -1):
if(re.match("^[\\" + markdown + "][\S]", words[wIndex])):
lineIndex1 = lIndex
wordIndex1 = wIndex
if(lineIndex1 >= 0):
if(re.match("[\S]+[\\" + markdown + "][\.\,\;\:]*$", words[wIndex])):
lineIndex2 = lIndex
wordIndex2 = wIndex
break
wIndex += 1
if(lineIndex2 >= 0):
break
if(lineIndex2 >= 0):
newLine1 = re.split("\s", self.__analyzing[lineIndex1])
newLine1[wordIndex1] = re.sub("^\\" + markdown, "<" + tag + ">", newLine1[wordIndex1])
self.__analyzing[lineIndex1] = " ".join(newLine1)
newLine2 = re.split("\s", self.__analyzing[lineIndex2])
newLine2[wordIndex2] = re.sub("\\" + markdown, "</" + tag + ">", newLine2[wordIndex2])
self.__analyzing[lineIndex2] = " ".join(newLine2)
return True
return False
def __convertFormat(self):
while self.__convertAttribute("_", "em"): continue
while self.__convertAttribute("*{2,2}", "strong"): continue
while self.__convertAttribute("`", "code"): continue
def __convertParagraph(self, tag):
if(len(self.__analyzing) > 0):
self.__analyzing[0] = "<" + tag + ">" + self.__analyzing[0]
self.__analyzing[-1] = "".join(self.__analyzing[-1].split("\n")) + "</" + tag + ">"
def __process(self, tag):
self.__convertFormat()
self.__convertParagraph(tag)
self.__formattedFile.extend(self.__analyzing)
self.__analyzing.clear()
def toHTML(self, filepath):
f = open(filepath, "r")
lines = f.readlines()
for line in lines:
self.__processSingleLine(line)
for li in self.__formattedFile:
print(li)
|
normal
|
{
"blob_id": "13e3337cf9e573b8906fe914a830a8e895af20ba",
"index": 3983,
"step-1": "<mask token>\n\n\nclass Markdown:\n <mask token>\n <mask token>\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n <mask token>\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-2": "<mask token>\n\n\nclass Markdown:\n <mask token>\n <mask token>\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-3": "<mask token>\n\n\nclass Markdown:\n __formattedFile = []\n __analyzing = []\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-4": "import re\n\n\nclass Markdown:\n __formattedFile = []\n __analyzing = []\n\n def __processSingleLine(self, line):\n if self.__isHeading(line):\n self.__process('p')\n self.__analyzing.append(re.sub('(#{1,6})', '', line).strip())\n self.__process('h' + str(len(re.split('\\\\s', line)[0])))\n elif self.__isHeading2(line):\n self.__process('h1')\n elif self.__isBlankLine(line):\n self.__process('p')\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match('^(#{1,6})(\\\\s)+', line) != None\n\n def __isHeading2(self, line):\n if len(self.__analyzing) == 1 and re.match('^[\\\\=]+$', line) != None:\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match('^[\\n]', line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split('\\\\s', self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if lineIndex1 == -1:\n if re.match('^[\\\\' + markdown + '][\\\\S]', words[wIndex]):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if lineIndex1 >= 0:\n if re.match('[\\\\S]+[\\\\' + markdown +\n '][\\\\.\\\\,\\\\;\\\\:]*$', words[wIndex]):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if lineIndex2 >= 0:\n break\n if lineIndex2 >= 0:\n newLine1 = re.split('\\\\s', self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub('^\\\\' + markdown, '<' + tag + '>',\n newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = ' '.join(newLine1)\n newLine2 = re.split('\\\\s', self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub('\\\\' + markdown, '</' + tag + '>',\n newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = ' '.join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute('_', 'em'):\n continue\n while self.__convertAttribute('*{2,2}', 'strong'):\n continue\n while self.__convertAttribute('`', 'code'):\n continue\n\n def __convertParagraph(self, tag):\n if len(self.__analyzing) > 0:\n self.__analyzing[0] = '<' + tag + '>' + self.__analyzing[0]\n self.__analyzing[-1] = ''.join(self.__analyzing[-1].split('\\n')\n ) + '</' + tag + '>'\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, 'r')\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)\n",
"step-5": "import re\n\nclass Markdown:\n\n __formattedFile = []\n __analyzing = []\n\n \n def __processSingleLine(self, line):\n if(self.__isHeading(line)):\n self.__process(\"p\")\n self.__analyzing.append(re.sub(\"(#{1,6})\", \"\", line).strip())\n self.__process(\"h\" + str(len(re.split(\"\\s\", line)[0])))\n elif(self.__isHeading2(line)):\n self.__process(\"h1\")\n elif(self.__isBlankLine(line)):\n self.__process(\"p\")\n else:\n self.__analyzing.append(line)\n\n def __isHeading(self, line):\n return re.match(\"^(#{1,6})(\\s)+\", line) != None\n\n def __isHeading2(self, line):\n if(len(self.__analyzing) == 1 and re.match(\"^[\\=]+$\", line) != None):\n return True\n return False\n\n def __isBlankLine(self, line):\n return re.match(\"^[\\n]\", line) != None\n\n def __convertAttribute(self, markdown, tag):\n lineIndex1 = -1\n wordIndex1 = -1\n lineIndex2 = -1\n wordIndex2 = -1\n for lIndex in range(len(self.__analyzing)):\n words = re.split(\"\\s\", self.__analyzing[lIndex])\n for wIndex in range(len(words)):\n if(lineIndex1 == -1):\n if(re.match(\"^[\\\\\" + markdown + \"][\\S]\", words[wIndex])):\n lineIndex1 = lIndex\n wordIndex1 = wIndex\n if(lineIndex1 >= 0):\n if(re.match(\"[\\S]+[\\\\\" + markdown + \"][\\.\\,\\;\\:]*$\", words[wIndex])):\n lineIndex2 = lIndex\n wordIndex2 = wIndex\n break\n wIndex += 1\n if(lineIndex2 >= 0):\n break\n if(lineIndex2 >= 0):\n newLine1 = re.split(\"\\s\", self.__analyzing[lineIndex1])\n newLine1[wordIndex1] = re.sub(\"^\\\\\" + markdown, \"<\" + tag + \">\", newLine1[wordIndex1])\n self.__analyzing[lineIndex1] = \" \".join(newLine1)\n newLine2 = re.split(\"\\s\", self.__analyzing[lineIndex2])\n newLine2[wordIndex2] = re.sub(\"\\\\\" + markdown, \"</\" + tag + \">\", newLine2[wordIndex2])\n self.__analyzing[lineIndex2] = \" \".join(newLine2)\n return True\n return False\n\n def __convertFormat(self):\n while self.__convertAttribute(\"_\", \"em\"): continue\n while self.__convertAttribute(\"*{2,2}\", \"strong\"): continue\n while self.__convertAttribute(\"`\", \"code\"): continue\n\n def __convertParagraph(self, tag):\n if(len(self.__analyzing) > 0):\n self.__analyzing[0] = \"<\" + tag + \">\" + self.__analyzing[0]\n self.__analyzing[-1] = \"\".join(self.__analyzing[-1].split(\"\\n\")) + \"</\" + tag + \">\"\n\n def __process(self, tag):\n self.__convertFormat()\n self.__convertParagraph(tag)\n self.__formattedFile.extend(self.__analyzing)\n self.__analyzing.clear()\n\n def toHTML(self, filepath):\n f = open(filepath, \"r\")\n lines = f.readlines()\n for line in lines:\n self.__processSingleLine(line)\n for li in self.__formattedFile:\n print(li)",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
<|reserved_special_token_0|>
print(response)
<|reserved_special_token_1|>
def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
response = 'It is a palinPerm' if palinPerm('dadadad'
) else 'No, not a palinPerm'
print(response)
<|reserved_special_token_1|>
# Write function that determines if a string a palindrome off of any permutation
def palinPerm(str):
# Create empty set
charSet = set()
# Loop through string, if character does not exist in set, add it. If it does, remove it.
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
# The final set should either have 1 element or none
return len(charSet) == 1 or len(charSet) == 0
response = "It is a palinPerm" if palinPerm("dadadad") else "No, not a palinPerm"
print(response)
# Time Complexity: O(N)
|
flexible
|
{
"blob_id": "04487dce97231a7be2bf3b164e93f0ea4d01ba05",
"index": 1160,
"step-1": "<mask token>\n",
"step-2": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\n<mask token>\n",
"step-3": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\n<mask token>\nprint(response)\n",
"step-4": "def palinPerm(str):\n charSet = set()\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n return len(charSet) == 1 or len(charSet) == 0\n\n\nresponse = 'It is a palinPerm' if palinPerm('dadadad'\n ) else 'No, not a palinPerm'\nprint(response)\n",
"step-5": "# Write function that determines if a string a palindrome off of any permutation\ndef palinPerm(str):\n # Create empty set\n charSet = set()\n\n # Loop through string, if character does not exist in set, add it. If it does, remove it.\n for c in str:\n if c not in charSet:\n charSet.add(c)\n else:\n charSet.remove(c)\n\n # The final set should either have 1 element or none\n return len(charSet) == 1 or len(charSet) == 0\n\n\nresponse = \"It is a palinPerm\" if palinPerm(\"dadadad\") else \"No, not a palinPerm\"\nprint(response)\n\n# Time Complexity: O(N)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
5 1
6 1x
1112#Desember@@@@@
|
normal
|
{
"blob_id": "b324c520400f04719b17121b0b4c2d23915e8841",
"index": 2666,
"step-1": "5 1\r\n6 1x\r\n1112#Desember@@@@@",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Styles(models.Model):
<|reserved_special_token_0|>
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pyeong(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Comments(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
user = models.ForeignKey('members.Users', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=
self.post.pk, username=self.user.username)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
unique_together = ('post', 'user'),
class Pyeong(models.Model):
type = models.CharField('평 수', max_length=20)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range(len(index_list)):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Posts(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=
'포스트', related_name='comment_set', related_query_name='comments')
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.
CASCADE)
content = models.TextField('댓글 내용', max_length=500)
created_at = models.DateTimeField('작성 날', auto_now_add=True)
updated_at = models.DateTimeField('수정 날짜', auto_now=True)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
user = models.ForeignKey('members.Users', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=
self.post.pk, username=self.user.username)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
unique_together = ('post', 'user'),
class Pyeong(models.Model):
type = models.CharField('평 수', max_length=20)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range(len(index_list)):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField('주거 환경', max_length=20)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField('디자인 스타일', max_length=10)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',
'로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField('색상', max_length=10)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',
'회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
image = models.ImageField(upload_to=get_image_filename, verbose_name=
'다중 이미지')
image_comment = models.TextField('사진 설명', max_length=200, blank=True,
null=True)
<|reserved_special_token_1|>
from django.conf import settings
from django.db import models
def get_image_filename(instance, filename):
a = f'post_images/{instance.post.title}.svg'
return a
def get_main_image_filename(instance, filename):
a = f'post_images/{instance.title}_main.svg'
return a
# Create your models here.
class Posts(models.Model):
PYEONG_CHOICE_FIELD = (
('1-7', '1-7평'),
('8-15', '8-15평'),
('16-25', '16-25평'),
('26-', '그 이상'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.TextField(
'제목', max_length=50
)
content = models.TextField(
'작성 글', max_length=500
)
main_image = models.ImageField(
upload_to=get_main_image_filename,
blank=True,
null=True,
verbose_name='메인 이미지',
)
pyeong = models.ManyToManyField(
'Pyeong',
blank=True,
)
created_at = models.DateTimeField(
'생성 날짜', auto_now_add=True,
)
updated_at = models.DateTimeField(
verbose_name='수정 날짜', auto_now=True, null=True, blank=True
)
like_users = models.ManyToManyField(
'members.Users',
through='PostLike',
related_name='like_posts',
related_query_name='like_post',
blank=True,
)
colors = models.ManyToManyField(
'posts.Colors',
blank=True,
)
housingtype = models.ManyToManyField(
'HousingTypes',
blank=True,
)
style = models.ManyToManyField(
'Styles',
blank=True,
)
postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)
@staticmethod
def initial_setting():
Pyeong.make_pyeng()
Colors.make_color()
HousingTypes.make_housing_type()
Styles.make_style()
class Meta:
verbose_name = '게시글'
verbose_name_plural = '게시글 목록'
def __str__(self):
return '%s : %s' % (self.pk, self.title)
class Comments(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
verbose_name='포스트',
related_name='comment_set',
related_query_name='comments',
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
content = models.TextField(
'댓글 내용', max_length=500
)
# 글쓴이
created_at = models.DateTimeField(
'작성 날', auto_now_add=True,
)
updated_at = models.DateTimeField(
'수정 날짜', auto_now=True,
)
class Meta:
verbose_name = '댓글'
verbose_name_plural = '댓글 목록'
def save(self, *args, **kwargs):
# 여기서 이미지 처리를 하게 될 듯
super().save(*args, **kwargs)
class PostLike(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
user = models.ForeignKey(
'members.Users',
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
def __str__(self):
return 'Post[{post_pk}] Like (User: {username})'.format(
post_pk=self.post.pk,
username=self.user.username,
)
class Meta:
verbose_name = '게시글 좋아요'
verbose_name_plural = f'{verbose_name} 목록'
# 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.
unique_together = (
('post', 'user'),
)
class Pyeong(models.Model):
type = models.CharField(
'평 수',
max_length=20,
)
@staticmethod
def make_pyeng():
index_list = ['1-7', '8-15', '16-25', '그 이상']
for i in range((len(index_list))):
Pyeong.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class HousingTypes(models.Model):
type = models.CharField(
'주거 환경',
max_length=20,
)
@staticmethod
def make_housing_type():
index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']
for i in range(len(index_list)):
HousingTypes.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Styles(models.Model):
type = models.CharField(
'디자인 스타일',
max_length=10,
)
@staticmethod
def make_style():
index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']
for i in range(len(index_list)):
Styles.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class Colors(models.Model):
type = models.CharField(
'색상',
max_length=10
)
@staticmethod
def make_color():
index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']
for i in range(len(index_list)):
Colors.objects.create(type=index_list[i])
def __str__(self):
return '%s : %s' % (self.pk, self.type)
class PostImages(models.Model):
post = models.ForeignKey(
Posts,
on_delete=models.CASCADE,
)
image = models.ImageField(
upload_to=get_image_filename,
verbose_name='다중 이미지',
)
image_comment = models.TextField(
'사진 설명', max_length=200, blank=True, null=True,
)
# 이미지 추가 스택오버플로우 정보
# https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django
|
flexible
|
{
"blob_id": "1bbadf02c4b9ca22a0099bcc09fa4c62c9901c39",
"index": 1069,
"step-1": "<mask token>\n\n\nclass Styles(models.Model):\n <mask token>\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-2": "<mask token>\n\n\nclass Pyeong(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-3": "<mask token>\n\n\nclass Comments(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-4": "<mask token>\n\n\nclass Posts(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE, verbose_name=\n '포스트', related_name='comment_set', related_query_name='comments')\n author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.\n CASCADE)\n content = models.TextField('댓글 내용', max_length=500)\n created_at = models.DateTimeField('작성 날', auto_now_add=True)\n updated_at = models.DateTimeField('수정 날짜', auto_now=True)\n\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n user = models.ForeignKey('members.Users', on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(post_pk=\n self.post.pk, username=self.user.username)\n\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n unique_together = ('post', 'user'),\n\n\nclass Pyeong(models.Model):\n type = models.CharField('평 수', max_length=20)\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range(len(index_list)):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField('주거 환경', max_length=20)\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField('디자인 스타일', max_length=10)\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스',\n '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField('색상', max_length=10)\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색',\n '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(Posts, on_delete=models.CASCADE)\n image = models.ImageField(upload_to=get_image_filename, verbose_name=\n '다중 이미지')\n image_comment = models.TextField('사진 설명', max_length=200, blank=True,\n null=True)\n",
"step-5": "from django.conf import settings\nfrom django.db import models\n\n\ndef get_image_filename(instance, filename):\n a = f'post_images/{instance.post.title}.svg'\n return a\n\n\ndef get_main_image_filename(instance, filename):\n a = f'post_images/{instance.title}_main.svg'\n return a\n\n\n# Create your models here.\nclass Posts(models.Model):\n PYEONG_CHOICE_FIELD = (\n ('1-7', '1-7평'),\n ('8-15', '8-15평'),\n ('16-25', '16-25평'),\n ('26-', '그 이상'),\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n title = models.TextField(\n '제목', max_length=50\n )\n content = models.TextField(\n '작성 글', max_length=500\n )\n main_image = models.ImageField(\n upload_to=get_main_image_filename,\n blank=True,\n null=True,\n verbose_name='메인 이미지',\n )\n pyeong = models.ManyToManyField(\n 'Pyeong',\n blank=True,\n )\n created_at = models.DateTimeField(\n '생성 날짜', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n verbose_name='수정 날짜', auto_now=True, null=True, blank=True\n )\n\n like_users = models.ManyToManyField(\n 'members.Users',\n through='PostLike',\n related_name='like_posts',\n related_query_name='like_post',\n blank=True,\n )\n\n colors = models.ManyToManyField(\n 'posts.Colors',\n blank=True,\n )\n housingtype = models.ManyToManyField(\n 'HousingTypes',\n blank=True,\n )\n style = models.ManyToManyField(\n 'Styles',\n blank=True,\n )\n postPyeong = models.CharField(max_length=10, choices=PYEONG_CHOICE_FIELD)\n\n @staticmethod\n def initial_setting():\n Pyeong.make_pyeng()\n Colors.make_color()\n HousingTypes.make_housing_type()\n Styles.make_style()\n\n class Meta:\n verbose_name = '게시글'\n verbose_name_plural = '게시글 목록'\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.title)\n\n\nclass Comments(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n verbose_name='포스트',\n related_name='comment_set',\n related_query_name='comments',\n )\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n content = models.TextField(\n '댓글 내용', max_length=500\n )\n # 글쓴이\n created_at = models.DateTimeField(\n '작성 날', auto_now_add=True,\n )\n updated_at = models.DateTimeField(\n '수정 날짜', auto_now=True,\n )\n\n class Meta:\n verbose_name = '댓글'\n verbose_name_plural = '댓글 목록'\n\n def save(self, *args, **kwargs):\n # 여기서 이미지 처리를 하게 될 듯\n super().save(*args, **kwargs)\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n user = models.ForeignKey(\n 'members.Users',\n on_delete=models.CASCADE,\n )\n created_at = models.DateTimeField(\n auto_now_add=True,\n )\n\n def __str__(self):\n return 'Post[{post_pk}] Like (User: {username})'.format(\n post_pk=self.post.pk,\n username=self.user.username,\n )\n\n class Meta:\n verbose_name = '게시글 좋아요'\n verbose_name_plural = f'{verbose_name} 목록'\n # 특정 유저가 특정 포스트 좋아요를 누른 정보는 유니크 해야 함.\n unique_together = (\n ('post', 'user'),\n )\n\n\nclass Pyeong(models.Model):\n type = models.CharField(\n '평 수',\n max_length=20,\n )\n\n @staticmethod\n def make_pyeng():\n index_list = ['1-7', '8-15', '16-25', '그 이상']\n for i in range((len(index_list))):\n Pyeong.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass HousingTypes(models.Model):\n type = models.CharField(\n '주거 환경',\n max_length=20,\n )\n\n @staticmethod\n def make_housing_type():\n index_list = ['빌라', '아파트', '오피스텔', '원룸', '투쓰리룸', '복층']\n for i in range(len(index_list)):\n HousingTypes.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Styles(models.Model):\n type = models.CharField(\n '디자인 스타일',\n max_length=10,\n )\n\n @staticmethod\n def make_style():\n index_list = ['모던', '미니멀리즘', '한국', '스칸다나비아', '인더스트리얼', '프로방스', '로맨틱', '클래식', '엔틱']\n for i in range(len(index_list)):\n Styles.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass Colors(models.Model):\n type = models.CharField(\n '색상',\n max_length=10\n )\n\n @staticmethod\n def make_color():\n index_list = ['빨강', '주황', '노랑', '초록', '파랑', '남색', '보라색', '검정', '흰색', '회색']\n for i in range(len(index_list)):\n Colors.objects.create(type=index_list[i])\n\n def __str__(self):\n return '%s : %s' % (self.pk, self.type)\n\n\nclass PostImages(models.Model):\n post = models.ForeignKey(\n Posts,\n on_delete=models.CASCADE,\n )\n image = models.ImageField(\n upload_to=get_image_filename,\n verbose_name='다중 이미지',\n )\n image_comment = models.TextField(\n '사진 설명', max_length=200, blank=True, null=True,\n )\n # 이미지 추가 스택오버플로우 정보\n # https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django\n",
"step-ids": [
9,
15,
23,
27,
32
]
}
|
[
9,
15,
23,
27,
32
] |
import sys
def saludar(saludo):
print saludo
def iniciales(nombre,ape1,ape2):
iniciales=nombre[0]+'.'+ape1[0]+'.'+ape2[0]+'.'
return "Tus iniciales son:"+iniciales.upper()
def iniciales1(nombre,ape1,*apellidos):
iniciales=nombre[0]+'.'+ape1[0]
for ape in apellidos:
iniciales=iniciales+'.'+ape[0]
return iniciales.upper()
|
normal
|
{
"blob_id": "01b615f8282d4d42c5e83181fffc2d7cb612c096",
"index": 704,
"step-1": "import sys \n\n\ndef saludar(saludo):\n\tprint saludo\n\ndef iniciales(nombre,ape1,ape2):\n\tiniciales=nombre[0]+'.'+ape1[0]+'.'+ape2[0]+'.'\n\treturn \"Tus iniciales son:\"+iniciales.upper()\n\n\ndef iniciales1(nombre,ape1,*apellidos):\n\tiniciales=nombre[0]+'.'+ape1[0]\n\tfor ape in apellidos:\n\t\tiniciales=iniciales+'.'+ape[0]\n\treturn iniciales.upper()\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = countBouncy * 100 / i
if currentProp >= prop:
return i
i += 1
return 'Proportion was not reached.'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = countBouncy * 100 / i
if currentProp >= prop:
return i
i += 1
return 'Proportion was not reached.'
calc(prop)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = countBouncy * 100 / i
if currentProp >= prop:
return i
i += 1
return 'Proportion was not reached.'
calc(prop)
<|reserved_special_token_1|>
import sys
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = countBouncy * 100 / i
if currentProp >= prop:
return i
i += 1
return 'Proportion was not reached.'
calc(prop)
<|reserved_special_token_1|>
import sys
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = (countBouncy * 100) / i
if currentProp >= prop:
return i
i += 1
return "Proportion was not reached."
calc(prop)
|
flexible
|
{
"blob_id": "0de27101675eb8328d9a2831ed468a969b03e7d3",
"index": 5741,
"step-1": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-3": "<mask token>\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-4": "import sys\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-5": "import sys\n\nprop = float(sys.argv[1])\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = (countBouncy * 100) / i\n if currentProp >= prop:\n return i\n i += 1\n return \"Proportion was not reached.\"\n\ncalc(prop)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def absent(lectureid, sectionid):
connection = psycopg2.connect(database='profmate', user='python',
password='python', host='34.74.217.167', port='5432')
cursor = connection.cursor()
postgreSQL_select_Query = (
"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );"
)
cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))
print('Selecting rows from POOL table using cursor.fetchall')
current_table = cursor.fetchall()
print("Print each row and it's columns values")
longstring = str('')
for row in current_table:
longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\n'))
longstring = ''.join((longstring, 'Family Name = ', row[1], '\n'))
longstring = ''.join((longstring, 'Given Name = ', row[2], '\n'))
cursor.close()
connection.close()
print('PostgreSQL connection is closed')
return longstring
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def absent(lectureid, sectionid):
connection = psycopg2.connect(database='profmate', user='python',
password='python', host='34.74.217.167', port='5432')
cursor = connection.cursor()
postgreSQL_select_Query = (
"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );"
)
cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))
print('Selecting rows from POOL table using cursor.fetchall')
current_table = cursor.fetchall()
print("Print each row and it's columns values")
longstring = str('')
for row in current_table:
longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\n'))
longstring = ''.join((longstring, 'Family Name = ', row[1], '\n'))
longstring = ''.join((longstring, 'Given Name = ', row[2], '\n'))
cursor.close()
connection.close()
print('PostgreSQL connection is closed')
return longstring
if __name__ == '__main__':
a = '234567890'
b = 'Tester'
c = 'One'
print(absent(101, 1001))
<|reserved_special_token_1|>
import psycopg2
def absent(lectureid, sectionid):
connection = psycopg2.connect(database='profmate', user='python',
password='python', host='34.74.217.167', port='5432')
cursor = connection.cursor()
postgreSQL_select_Query = (
"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );"
)
cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))
print('Selecting rows from POOL table using cursor.fetchall')
current_table = cursor.fetchall()
print("Print each row and it's columns values")
longstring = str('')
for row in current_table:
longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\n'))
longstring = ''.join((longstring, 'Family Name = ', row[1], '\n'))
longstring = ''.join((longstring, 'Given Name = ', row[2], '\n'))
cursor.close()
connection.close()
print('PostgreSQL connection is closed')
return longstring
if __name__ == '__main__':
a = '234567890'
b = 'Tester'
c = 'One'
print(absent(101, 1001))
<|reserved_special_token_1|>
# # -*- coding: utf-8 -*-
#
# """
# Py40 PyQt5 tutorial
#
# This example shows three labels on a window
# using absolute positioning.
#
# author: Jan Bodnar
# website: py40.com
# last edited: January 2015
# """
#
# import sys
# from PyQt5.QtWidgets import QWidget, QLabel, QApplication
#
#
# class Example(QWidget):
#
# def __init__(self):
# super().__init__()
#
# self.initUI()
#
# def initUI(self):
# lbl1 = QLabel('Zetcode', self)
# lbl1.move(15, 10)
#
# lbl2 = QLabel('tutorials', self)
# lbl2.move(35, 40)
#
# lbl3 = QLabel('for programmers', self)
# lbl3.move(55, 70)
#
# self.setGeometry(300, 300, 250, 150)
# self.setWindowTitle('Absolute')
# self.show()
#
#
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# ex = Example()
# sys.exit(app.exec_())
import psycopg2
def absent(lectureid,sectionid):
connection = psycopg2.connect(database="profmate", user="python", password="python", host="34.74.217.167",
port="5432")
cursor = connection.cursor()
postgreSQL_select_Query ="select * from lec_%s \
where student_id not in (select base.studentid\
from (select S.SectionID,Lectures.Lecture_Name,P.StudentID\
from Sections As S\
Join POOL as P\
On (P.Time > S.Time_Start)\
and (P.Time < S.Time_End)\
Join Lectures\
ON S.LectureID = Lectures.Lecture_ID\
Order By SectionID) as base\
join Students \
ON base.studentid = Students.Student_ID\
where sectionid = '%s' );"
cursor.execute(postgreSQL_select_Query,(lectureid,sectionid))
print("Selecting rows from POOL table using cursor.fetchall")
current_table = cursor.fetchall()
print("Print each row and it's columns values")
longstring = str('')
for row in current_table:
# print("Student ID = ", row[0])
# print("Family Name = ", row[1])
# print("Given Name = ", row[2], "\n")
longstring = "".join((longstring, "Student ID = ",str(row[0]),"\n"))
longstring = "".join((longstring, "Family Name = ", row[1], "\n"))
longstring = "".join((longstring, "Given Name = ", row[2], "\n"))
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return longstring
if __name__ == '__main__':
a = '234567890'
b = 'Tester'
c = 'One'
# insert_students(a, b, c)
print(absent(101, 1001))
|
flexible
|
{
"blob_id": "e05dac901228e6972c1cb48ce2def3d248b4c167",
"index": 3053,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n print(absent(101, 1001))\n",
"step-4": "import psycopg2\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n print(absent(101, 1001))\n",
"step-5": "# # -*- coding: utf-8 -*-\n#\n# \"\"\"\n# Py40 PyQt5 tutorial\n#\n# This example shows three labels on a window\n# using absolute positioning.\n#\n# author: Jan Bodnar\n# website: py40.com\n# last edited: January 2015\n# \"\"\"\n#\n# import sys\n# from PyQt5.QtWidgets import QWidget, QLabel, QApplication\n#\n#\n# class Example(QWidget):\n#\n# def __init__(self):\n# super().__init__()\n#\n# self.initUI()\n#\n# def initUI(self):\n# lbl1 = QLabel('Zetcode', self)\n# lbl1.move(15, 10)\n#\n# lbl2 = QLabel('tutorials', self)\n# lbl2.move(35, 40)\n#\n# lbl3 = QLabel('for programmers', self)\n# lbl3.move(55, 70)\n#\n# self.setGeometry(300, 300, 250, 150)\n# self.setWindowTitle('Absolute')\n# self.show()\n#\n#\n# if __name__ == '__main__':\n# app = QApplication(sys.argv)\n# ex = Example()\n# sys.exit(app.exec_())\nimport psycopg2\n\ndef absent(lectureid,sectionid):\n connection = psycopg2.connect(database=\"profmate\", user=\"python\", password=\"python\", host=\"34.74.217.167\",\n port=\"5432\")\n cursor = connection.cursor()\n postgreSQL_select_Query =\"select * from lec_%s \\\n where student_id not in (select base.studentid\\\n from (select S.SectionID,Lectures.Lecture_Name,P.StudentID\\\n from Sections As S\\\n Join POOL as P\\\n On (P.Time > S.Time_Start)\\\n and (P.Time < S.Time_End)\\\n Join Lectures\\\n ON S.LectureID = Lectures.Lecture_ID\\\n Order By SectionID) as base\\\n join Students \\\n ON base.studentid = Students.Student_ID\\\n where sectionid = '%s' );\"\n\n cursor.execute(postgreSQL_select_Query,(lectureid,sectionid))\n print(\"Selecting rows from POOL table using cursor.fetchall\")\n current_table = cursor.fetchall()\n\n print(\"Print each row and it's columns values\")\n\n longstring = str('')\n for row in current_table:\n # print(\"Student ID = \", row[0])\n # print(\"Family Name = \", row[1])\n # print(\"Given Name = \", row[2], \"\\n\")\n longstring = \"\".join((longstring, \"Student ID = \",str(row[0]),\"\\n\"))\n longstring = \"\".join((longstring, \"Family Name = \", row[1], \"\\n\"))\n longstring = \"\".join((longstring, \"Given Name = \", row[2], \"\\n\"))\n\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n return longstring\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n # insert_students(a, b, c)\n print(absent(101, 1001))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from math import pow
from math import tan
import plotly.figure_factory as ff
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
def euler():
h = 0.1
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
y_eval = [0.0]
delta_y = [0.0]
y_real = [0.0]
eps = [0.0]
for i in range(1, len(x)):
y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))
delta_y.append(h * fun(y_eval[i], x[i]))
y_real.append(real_fun(x[i]))
eps.append(abs(y_real[i] - y_eval[i]))
# print in table format
print(y_eval)
print(delta_y)
print(y_real)
print(eps)
data_matrix = [
['k', 'x', 'y', 'delta_y', 'y_real', 'eps']
]
for i in range(0, len(x)):
data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])
table = ff.create_table(data_matrix)
plot(table)
def fun(x, y):
return pow(x + y, 2)
def real_fun(x):
return tan(x) - x
euler()
|
normal
|
{
"blob_id": "20f0480ee7e0782b23ec8ade150cdd8d8ad718bb",
"index": 783,
"step-1": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-4": "from math import pow\nfrom math import tan\nimport plotly.figure_factory as ff\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n data_matrix = [['k', 'x', 'y', 'delta_y', 'y_real', 'eps']]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-5": "from math import pow\nfrom math import tan\n\nimport plotly.figure_factory as ff\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\ndef euler():\n h = 0.1\n x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]\n y_eval = [0.0]\n delta_y = [0.0]\n y_real = [0.0]\n eps = [0.0]\n\n for i in range(1, len(x)):\n y_eval.append(y_eval[i - 1] + h * fun(x[i - 1], y_eval[i - 1]))\n delta_y.append(h * fun(y_eval[i], x[i]))\n y_real.append(real_fun(x[i]))\n eps.append(abs(y_real[i] - y_eval[i]))\n\n # print in table format\n print(y_eval)\n print(delta_y)\n print(y_real)\n print(eps)\n\n data_matrix = [\n ['k', 'x', 'y', 'delta_y', 'y_real', 'eps']\n ]\n for i in range(0, len(x)):\n data_matrix.append([i, x[i], y_eval[i], delta_y[i], y_real[i], eps[i]])\n\n table = ff.create_table(data_matrix)\n plot(table)\n\n\ndef fun(x, y):\n return pow(x + y, 2)\n\n\ndef real_fun(x):\n return tan(x) - x\n\n\neuler()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def decorate(a):
def inner(f):
def decorated(*args, **kwargs):
return f(a, *args, **kwargs)
return decorated
return inner
@decorate(3)
def func(a, b, c):
print a, b, c
func(1, 2)
|
normal
|
{
"blob_id": "d2049b20e00b45df9fb0772d9a654a58a00191c5",
"index": 9865,
"step-1": "def decorate(a):\n def inner(f):\n def decorated(*args, **kwargs):\n return f(a, *args, **kwargs)\n return decorated\n return inner\n\n\n@decorate(3)\ndef func(a, b, c):\n print a, b, c\n\n\nfunc(1, 2)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if numone > numtwo:
print('The first number is bigger')
elif numtwo > numone:
print('The second number is bigger')
else:
print('The numbers are the same')
if numone % 3 == 0 and numtwo % 3 == 0:
print('They are both divisible by 3')
elif numone % 3 == 0:
print('Only the first number is divisible by three')
elif numtwo % 3 == 0:
print('Only the second number is divisible by three')
else:
print('Neither number is divisible by 3')
<|reserved_special_token_0|>
if product == numone * numtwo:
print('correct')
else:
print('incorrect')
<|reserved_special_token_1|>
numone = int(input('Enter a number: '))
numtwo = int(input('Enter a 2nd number: '))
if numone > numtwo:
print('The first number is bigger')
elif numtwo > numone:
print('The second number is bigger')
else:
print('The numbers are the same')
if numone % 3 == 0 and numtwo % 3 == 0:
print('They are both divisible by 3')
elif numone % 3 == 0:
print('Only the first number is divisible by three')
elif numtwo % 3 == 0:
print('Only the second number is divisible by three')
else:
print('Neither number is divisible by 3')
product = int(input('What is the product of your two numbers?: '))
if product == numone * numtwo:
print('correct')
else:
print('incorrect')
<|reserved_special_token_1|>
#Max Low
#9-25-17
#quiz2.py -- numbers , bigger smaller same, divisible by 3, product and correct person
numone = int(input('Enter a number: '))
numtwo = int(input('Enter a 2nd number: '))
if numone > numtwo:
print('The first number is bigger')
elif numtwo > numone:
print('The second number is bigger')
else:
print('The numbers are the same')
if numone % 3 == 0 and numtwo % 3 == 0:
print('They are both divisible by 3')
elif numone % 3 == 0:
print('Only the first number is divisible by three')
elif numtwo % 3 == 0:
print('Only the second number is divisible by three')
else:
print('Neither number is divisible by 3')
product = int(input('What is the product of your two numbers?: '))
if product == numone*numtwo:
print('correct')
else:
print('incorrect')
|
flexible
|
{
"blob_id": "a67612e8301728d1fb366d7c8909fa830f04bf45",
"index": 9739,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\n<mask token>\nif product == numone * numtwo:\n print('correct')\nelse:\n print('incorrect')\n",
"step-3": "numone = int(input('Enter a number: '))\nnumtwo = int(input('Enter a 2nd number: '))\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\nproduct = int(input('What is the product of your two numbers?: '))\nif product == numone * numtwo:\n print('correct')\nelse:\n print('incorrect')\n",
"step-4": "#Max Low\n#9-25-17\n#quiz2.py -- numbers , bigger smaller same, divisible by 3, product and correct person\n\nnumone = int(input('Enter a number: '))\nnumtwo = int(input('Enter a 2nd number: '))\n\nif numone > numtwo:\n print('The first number is bigger')\nelif numtwo > numone:\n print('The second number is bigger')\nelse:\n print('The numbers are the same')\n\n \nif numone % 3 == 0 and numtwo % 3 == 0:\n print('They are both divisible by 3')\nelif numone % 3 == 0:\n print('Only the first number is divisible by three')\nelif numtwo % 3 == 0:\n print('Only the second number is divisible by three')\nelse:\n print('Neither number is divisible by 3')\n\nproduct = int(input('What is the product of your two numbers?: '))\nif product == numone*numtwo:\n print('correct')\nelse:\n print('incorrect')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('{0}\n{1}\n{0}'.format(line, header))
for i in range(-10, 31):
print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Aspen Thompson'
header = '| Celsius | Fahrenheit |'
line = '-' * len(header)
print('{0}\n{1}\n{0}'.format(line, header))
for i in range(-10, 31):
print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))
<|reserved_special_token_1|>
"""
Generates a temperature celsius to fahrenheit conversion table
AT
11-10-2018
"""
__author__ = "Aspen Thompson"
header = "| Celsius | Fahrenheit |"
line = "-" * len(header)
print("{0}\n{1}\n{0}".format(line, header))
for i in range(-10, 31):
print("| {:^7} | {:^10.10} |".format(i, i * 1.8 + 32))
|
flexible
|
{
"blob_id": "591d0a166af5b8d0bed851c2f56ecc3da4f3a5eb",
"index": 4367,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{0}\\n{1}\\n{0}'.format(line, header))\nfor i in range(-10, 31):\n print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))\n",
"step-3": "<mask token>\n__author__ = 'Aspen Thompson'\nheader = '| Celsius | Fahrenheit |'\nline = '-' * len(header)\nprint('{0}\\n{1}\\n{0}'.format(line, header))\nfor i in range(-10, 31):\n print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))\n",
"step-4": "\"\"\"\nGenerates a temperature celsius to fahrenheit conversion table\n\nAT\n11-10-2018\n\"\"\"\n\n__author__ = \"Aspen Thompson\"\n\nheader = \"| Celsius | Fahrenheit |\"\nline = \"-\" * len(header)\nprint(\"{0}\\n{1}\\n{0}\".format(line, header))\n\nfor i in range(-10, 31):\n print(\"| {:^7} | {:^10.10} |\".format(i, i * 1.8 + 32))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class liteQueue:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url,
timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock)
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if len(result) > 0:
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit()
return result
else:
conn.commit()
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class liteQueue:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, category, parser):
self.conn_url = 'databases/' + parser + '_' + category + '.db'
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url,
timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock)
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if len(result) > 0:
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit()
return result
else:
conn.commit()
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class liteQueue:
_create = (
"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\tINTEGER,PRIMARY KEY(url));"
)
_putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'
_iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'
_write_lock = 'BEGIN IMMEDIATE'
_pop_get_many = (
'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '
)
_pop_del_many = (
'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'
)
def __init__(self, category, parser):
self.conn_url = 'databases/' + parser + '_' + category + '.db'
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url,
timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock)
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if len(result) > 0:
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit()
return result
else:
conn.commit()
return None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_queue(category, parser):
if sq == None:
return liteQueue(category, parser)
return sq
<|reserved_special_token_0|>
class liteQueue:
_create = (
"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\tINTEGER,PRIMARY KEY(url));"
)
_putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'
_iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'
_write_lock = 'BEGIN IMMEDIATE'
_pop_get_many = (
'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '
)
_pop_del_many = (
'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'
)
def __init__(self, category, parser):
self.conn_url = 'databases/' + parser + '_' + category + '.db'
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url,
timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock)
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if len(result) > 0:
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit()
return result
else:
conn.commit()
return None
<|reserved_special_token_1|>
import sqlite3
import sys
import threading
from time import sleep
sq = None
def get_queue(category, parser):
if sq == None:
return liteQueue(category, parser)
return sq
"""
SqLite Job Handler class for Links
"""
class liteQueue:
_create = "CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category' TEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs' INTEGER,PRIMARY KEY(url));"
_putList = "INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)"
_iterate = "SELECT * FROM LINK WHERE FETCHED = 0"
_write_lock = "BEGIN IMMEDIATE"
_pop_get_many = "SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT "
_pop_del_many = "UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)"
def __init__(self, category, parser):
self.conn_url = "databases/" + parser + "_" + category + ".db"
self._connection_cache = {}
with self._get_conn() as conn:
conn.execute(self._create)
def _get_conn(self):
id = threading.current_thread().ident
if id not in self._connection_cache:
self._connection_cache[id] = sqlite3.Connection(self.conn_url, timeout=60)
return self._connection_cache[id]
def __iter__(self):
with self._get_conn() as conn:
for result in conn.execute(self._iterate):
yield result
def put_many(self, list_obj):
with self._get_conn() as conn:
try:
conn.cursor().executemany(self._putList, list_obj)
except Exception as e:
print(e)
def pop_many(self, amount, sleep_wait=True):
keep_pooling = True
sql_pop = self._pop_get_many + str(amount)
with self._get_conn() as conn:
result = None
while keep_pooling:
conn.execute(self._write_lock) # lock the database
cursor = conn.execute(sql_pop)
result = cursor.fetchall()
if(len(result) > 0):
keep_pooling = False
id_first = int(result[0][4])
id_last = int(result[-1][4])
conn.execute(self._pop_del_many, (id_first, id_last))
conn.commit() # unlock the database
return result
else:
conn.commit() # unlock the database
return None
|
flexible
|
{
"blob_id": "ed6eda4b6dbf3e94d8efb53004b19cd9c49e927e",
"index": 3979,
"step-1": "<mask token>\n\n\nclass liteQueue:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-2": "<mask token>\n\n\nclass liteQueue:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-3": "<mask token>\n\n\nclass liteQueue:\n _create = (\n \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\\tINTEGER,PRIMARY KEY(url));\"\n )\n _putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'\n _iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'\n _write_lock = 'BEGIN IMMEDIATE'\n _pop_get_many = (\n 'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '\n )\n _pop_del_many = (\n 'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'\n )\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-4": "<mask token>\n\n\ndef get_queue(category, parser):\n if sq == None:\n return liteQueue(category, parser)\n return sq\n\n\n<mask token>\n\n\nclass liteQueue:\n _create = (\n \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\\tINTEGER,PRIMARY KEY(url));\"\n )\n _putList = 'INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)'\n _iterate = 'SELECT * FROM LINK WHERE FETCHED = 0'\n _write_lock = 'BEGIN IMMEDIATE'\n _pop_get_many = (\n 'SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT '\n )\n _pop_del_many = (\n 'UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)'\n )\n\n def __init__(self, category, parser):\n self.conn_url = 'databases/' + parser + '_' + category + '.db'\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n\n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url,\n timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n\n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n\n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock)\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n if len(result) > 0:\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit()\n return result\n else:\n conn.commit()\n return None\n",
"step-5": "import sqlite3\nimport sys\nimport threading\nfrom time import sleep\n\nsq = None\n\ndef get_queue(category, parser):\n if sq == None:\n return liteQueue(category, parser)\n return sq\n\n\"\"\"\nSqLite Job Handler class for Links\n\"\"\"\nclass liteQueue:\n _create = \"CREATE TABLE IF NOT EXISTS link ( 'url' TEXT,'category'\tTEXT,'origin' TEXT, 'thumb' TEXT, 'fetched' INTEGER,'fetched_imgs'\tINTEGER,PRIMARY KEY(url));\"\n _putList = \"INSERT OR IGNORE INTO link VALUES(?, ?, ?, ?, ?, ?)\"\n _iterate = \"SELECT * FROM LINK WHERE FETCHED = 0\"\n _write_lock = \"BEGIN IMMEDIATE\"\n _pop_get_many = \"SELECT URL, CATEGORY, ORIGIN, THUMB, ROWID FROM LINK WHERE FETCHED = 0 ORDER BY ROWID ASC LIMIT \"\n _pop_del_many = \"UPDATE LINK SET FETCHED=1 WHERE FETCHED = 0 AND (ROWID >= ? AND ROWID <=?)\"\n\n def __init__(self, category, parser):\n self.conn_url = \"databases/\" + parser + \"_\" + category + \".db\"\n self._connection_cache = {}\n with self._get_conn() as conn:\n conn.execute(self._create)\n \n def _get_conn(self):\n id = threading.current_thread().ident\n if id not in self._connection_cache:\n self._connection_cache[id] = sqlite3.Connection(self.conn_url, timeout=60)\n return self._connection_cache[id]\n\n def __iter__(self):\n with self._get_conn() as conn:\n for result in conn.execute(self._iterate):\n yield result\n \n def put_many(self, list_obj):\n with self._get_conn() as conn:\n try:\n conn.cursor().executemany(self._putList, list_obj)\n except Exception as e:\n print(e)\n \n def pop_many(self, amount, sleep_wait=True):\n keep_pooling = True\n sql_pop = self._pop_get_many + str(amount)\n with self._get_conn() as conn:\n result = None\n while keep_pooling:\n conn.execute(self._write_lock) # lock the database\n cursor = conn.execute(sql_pop)\n result = cursor.fetchall()\n\n if(len(result) > 0):\n keep_pooling = False\n id_first = int(result[0][4])\n id_last = int(result[-1][4])\n conn.execute(self._pop_del_many, (id_first, id_last))\n conn.commit() # unlock the database\n return result\n else:\n conn.commit() # unlock the database\n return None",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
<|reserved_special_token_0|>
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
<|reserved_special_token_0|>
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
<|reserved_special_token_0|>
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1D(*args, **kwargs)
elif dims == 2:
return nn.Conv2D(*args, **kwargs)
elif dims == 3:
return nn.Conv3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
<|reserved_special_token_0|>
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1D(*args, **kwargs)
elif dims == 2:
return nn.Conv2D(*args, **kwargs)
elif dims == 3:
return nn.Conv3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
<|reserved_special_token_0|>
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=
half, dtype=paddle.float32) / half)
args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]
embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)
if dim % 2:
embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,
:1])], axis=-1)
return embedding
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1D(*args, **kwargs)
elif dims == 2:
return nn.Conv2D(*args, **kwargs)
elif dims == 3:
return nn.Conv3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f'unsupported dimensions: {dims}')
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=
half, dtype=paddle.float32) / half)
args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]
embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)
if dim % 2:
embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,
:1])], axis=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
This function is disabled. And now just forward.
"""
return func(*inputs)
<|reserved_special_token_1|>
"""
Various utilities for neural networks implemented by Paddle. This code is rewritten based on:
https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py
"""
import math
import paddle
import paddle.nn as nn
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1D(*args, **kwargs)
elif dims == 2:
return nn.Conv2D(*args, **kwargs)
elif dims == 3:
return nn.Conv3D(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)
args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]
embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)
if dim % 2:
embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:, :1])], axis=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
This function is disabled. And now just forward.
"""
return func(*inputs)
|
flexible
|
{
"blob_id": "364d70fab02291bafadebea68fee94c0210e2de9",
"index": 4365,
"step-1": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\n<mask token>\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=\n half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,\n :1])], axis=-1)\n return embedding\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=\n half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,\n :1])], axis=-1)\n return embedding\n\n\ndef checkpoint(func, inputs, params, flag):\n \"\"\"\n This function is disabled. And now just forward.\n \"\"\"\n return func(*inputs)\n",
"step-5": "\"\"\"\nVarious utilities for neural networks implemented by Paddle. This code is rewritten based on:\nhttps://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py\n\"\"\"\nimport math\n\nimport paddle\nimport paddle.nn as nn\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")\n\n\ndef linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:, :1])], axis=-1)\n return embedding\n\n\ndef checkpoint(func, inputs, params, flag):\n \"\"\"\n This function is disabled. And now just forward.\n \"\"\"\n return func(*inputs)\n",
"step-ids": [
9,
11,
12,
14,
16
]
}
|
[
9,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
os.chdir(os.path.join(os.getcwd(), 'docs'))
print(os.getcwd())
except:
pass
<|reserved_special_token_0|>
g.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])
g.set_ylabels("""% Spermatocyte Cells
With Expression""")
g.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches
='tight')
<|reserved_special_token_0|>
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
res
<|reserved_special_token_0|>
ax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')
plt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',
bbox_inches='tight')
<|reserved_special_token_0|>
display(res)
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
<|reserved_special_token_0|>
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(
results.summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
<|reserved_special_token_0|>
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_full.png', str(results.
summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
os.chdir(os.path.join(os.getcwd(), 'docs'))
print(os.getcwd())
except:
pass
fbgns_no_intron = pickle.load(open(
'../output/paper_submission/intron_less_genes.pkl', 'rb'))
background = pickle.load(open(
'../output/paper_submission/background_fbgns.pkl', 'rb'))
fbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'
, columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()
chrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index
bias = pd.read_feather(
'../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(
gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),
True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=
lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)
).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [
'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background
).dropna()
df = bias.copy().join(fbgn2chrom)
df['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)
df['X'] = np.where(df.index.isin(chrx_fbgns), True, False)
df['bias'] = 'NS'
df.loc[df.gonia_bias, 'bias'] = 'gonia'
df.loc[df.cyte_bias, 'bias'] = 'cyte'
g = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=
'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,
sharey=True, margin_titles=True)
g.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])
g.set_ylabels("""% Spermatocyte Cells
With Expression""")
g.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches
='tight')
ct = pd.crosstab(df.intronless, df.bias)
res = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',
'flag_sig']), :]
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
res
zscores_intronless = feather_to_cluster_rep_matrix(
'../output/paper_submission/zscore_by_cluster_rep.feather').reindex(
fbgns_no_intron).dropna()
ax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,
yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)
ax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')
plt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',
bbox_inches='tight')
intronless2chrom = fbgn2chrom.to_frame().query(
"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']")
intronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(
fbgns_no_intron), True, False)
ct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)
res = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',
'flag_sig']), :]
display(res)
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
model = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,
(False): 0}))
results = model.fit()
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(
results.summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
model = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,
(False): 0}))
results = model.fit()
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_full.png', str(results.
summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
<|reserved_special_token_1|>
import os
import pickle
import numpy as np
import pandas as pd
from scipy.stats import fisher_exact, contingency
from IPython.display import display, Markdown
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.api import formula as smf
from tabulate import tabulate
from larval_gonad.io import feather_to_cluster_rep_matrix
from larval_gonad.stats import run_chisq
from larval_gonad.plotting import plot_statsmodels_results
try:
os.chdir(os.path.join(os.getcwd(), 'docs'))
print(os.getcwd())
except:
pass
fbgns_no_intron = pickle.load(open(
'../output/paper_submission/intron_less_genes.pkl', 'rb'))
background = pickle.load(open(
'../output/paper_submission/background_fbgns.pkl', 'rb'))
fbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'
, columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()
chrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index
bias = pd.read_feather(
'../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(
gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),
True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=
lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)
).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [
'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background
).dropna()
df = bias.copy().join(fbgn2chrom)
df['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)
df['X'] = np.where(df.index.isin(chrx_fbgns), True, False)
df['bias'] = 'NS'
df.loc[df.gonia_bias, 'bias'] = 'gonia'
df.loc[df.cyte_bias, 'bias'] = 'cyte'
g = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=
'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,
sharey=True, margin_titles=True)
g.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])
g.set_ylabels("""% Spermatocyte Cells
With Expression""")
g.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches
='tight')
ct = pd.crosstab(df.intronless, df.bias)
res = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',
'flag_sig']), :]
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
res
zscores_intronless = feather_to_cluster_rep_matrix(
'../output/paper_submission/zscore_by_cluster_rep.feather').reindex(
fbgns_no_intron).dropna()
ax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,
yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)
ax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')
plt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',
bbox_inches='tight')
intronless2chrom = fbgn2chrom.to_frame().query(
"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']")
intronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(
fbgns_no_intron), True, False)
ct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)
res = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',
'flag_sig']), :]
display(res)
print(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt
='github'))
model = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,
(False): 0}))
results = model.fit()
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(
results.summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
model = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,
(False): 0}))
results = model.fit()
plot_statsmodels_results(
'../output/docs/x_escapers_and_intronless_genes_full.png', str(results.
summary2()))
display(results.summary2())
np.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]
<|reserved_special_token_1|>
#%% [markdown]
# # Look at intron-less gene enrichment in Cyte biased expressed genes.
# This is a quick look at if parimary spermatocyte biased genes are enriched in intronless genes.
# Yes this is what we see.
#%%
import os
import pickle
import numpy as np
import pandas as pd
from scipy.stats import fisher_exact, contingency
from IPython.display import display, Markdown
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.api import formula as smf
from tabulate import tabulate
from larval_gonad.io import feather_to_cluster_rep_matrix
from larval_gonad.stats import run_chisq
from larval_gonad.plotting import plot_statsmodels_results
try:
os.chdir(os.path.join(os.getcwd(), "docs"))
print(os.getcwd())
except:
pass
#%%
# Get list of intronless FBgns
fbgns_no_intron = pickle.load(open("../output/paper_submission/intron_less_genes.pkl", "rb"))
background = pickle.load(open("../output/paper_submission/background_fbgns.pkl", "rb"))
#%%
# Get list of X chromosome genes
fbgn2chrom = (
pd.read_feather(
"../references/gene_annotation_dmel_r6-26.feather", columns=["FBgn", "FB_chrom"]
)
.set_index("FBgn")
.squeeze()
)
chrx_fbgns = fbgn2chrom[fbgn2chrom == "X"].index
#%%
# Get gonia biased and cyte biased genes
bias = (
pd.read_feather("../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather")
.assign(gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0), True, False))
.assign(pct_gonia=lambda x: x["pct.1"])
.assign(cyte_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False))
.assign(pct_cyte=lambda x: x["pct.2"])
.set_index("FBgn")
.loc[:, ["gonia_bias", "cyte_bias", "pct_gonia", "pct_cyte"]]
.reindex(background)
.dropna()
)
#%%
# Munge all into a dataframe
df = bias.copy().join(fbgn2chrom)
df["intronless"] = np.where(df.index.isin(fbgns_no_intron), True, False)
df["X"] = np.where(df.index.isin(chrx_fbgns), True, False)
df["bias"] = "NS"
df.loc[df.gonia_bias, "bias"] = "gonia"
df.loc[df.cyte_bias, "bias"] = "cyte"
#%% [markdown]
# ## How are intronless genes expressed in primary spermatocytes?
#%% [markdown]
# ### Intronless genes are expressed in fewer cells than genes with introns.
#%%
# Plot percent cytes with expression by bias*chrom*intronless
g = sns.FacetGrid(
df,
row="bias",
row_order=["cyte", "gonia", "NS"],
col="FB_chrom",
col_order=["X", "2L", "2R", "3L", "3R"],
sharex=True,
sharey=True,
margin_titles=True,
)
g.map(sns.boxplot, "intronless", "pct_cyte", order=[False, True])
g.set_ylabels("% Spermatocyte Cells\nWith Expression")
g.savefig("../output/docs/x_escapers_and_intronless_genes.svg", bbox_inches="tight")
#%% [markdown]
# ### However, intronless genes are enriched in genes with primary spermatocyte biased expression.
#%%
# Cross tab of intronless * bias
ct = pd.crosstab(df.intronless, df.bias)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
res
#%%
zscores_intronless = (
feather_to_cluster_rep_matrix("../output/paper_submission/zscore_by_cluster_rep.feather")
.reindex(fbgns_no_intron)
.dropna()
)
ax = sns.clustermap(
zscores_intronless,
col_cluster=False,
xticklabels=True,
yticklabels=False,
cmap="viridis",
vmin=-3,
vmax=3,
rasterized=True,
)
ax.ax_heatmap.set(xlabel="", ylabel="Intronless Genes")
plt.savefig("../output/docs/x_escapers_and_intronless_genes_heatmap.svg", bbox_inches="tight")
#%% [markdown]
# ## Are intronless genes enriched in X chromosome escapers?
#%% [markdown]
# ### Intronless genes are depleted on the X chromosome.
#%%
# intronless genes across the genome
intronless2chrom = fbgn2chrom.to_frame().query(
"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']"
)
intronless2chrom["intronless"] = np.where(intronless2chrom.index.isin(fbgns_no_intron), True, False)
ct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)
res = run_chisq(ct).loc[(slice(None), ["observed", "adj std residual", "flag_sig"]), :]
display(res)
print(tabulate(res.reset_index(), headers="keys", showindex=False, tablefmt="github"))
#%% [markdown]
# ### X chromosome escapers are not enriched for intronless genes.
#%% [markdown]
# #### Main Effects Model Logit(intronless = cyte_biased + X chromosome)
#%%
# Main effects model
model = smf.logit("intronless ~ cyte_bias + X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_main_effects.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%% [markdown]
# #### Full Model Logit(intronless = cyte_biased + X chromosome + cyte_biased * X chromosome)
#%%
# FUll Model
model = smf.logit("intronless ~ cyte_bias * X", data=df.replace({True: 1, False: 0}))
results = model.fit()
plot_statsmodels_results(
"../output/docs/x_escapers_and_intronless_genes_full.png", str(results.summary2())
)
display(results.summary2())
np.exp(results.params).rename("Odds Ratio").to_frame()[results.pvalues <= 0.05]
#%%
|
flexible
|
{
"blob_id": "5f4d83aa2b530417ecb1598510fb4778b111700b",
"index": 6489,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\n<mask token>\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\n<mask token>\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\n<mask token>\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\n<mask token>\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\n<mask token>\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n<mask token>\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-3": "<mask token>\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\nfbgns_no_intron = pickle.load(open(\n '../output/paper_submission/intron_less_genes.pkl', 'rb'))\nbackground = pickle.load(open(\n '../output/paper_submission/background_fbgns.pkl', 'rb'))\nfbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'\n , columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()\nchrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index\nbias = pd.read_feather(\n '../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(\n gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),\n True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=\n lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)\n ).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [\n 'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background\n ).dropna()\ndf = bias.copy().join(fbgn2chrom)\ndf['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf['X'] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf['bias'] = 'NS'\ndf.loc[df.gonia_bias, 'bias'] = 'gonia'\ndf.loc[df.cyte_bias, 'bias'] = 'cyte'\ng = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=\n 'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,\n sharey=True, margin_titles=True)\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\nzscores_intronless = feather_to_cluster_rep_matrix(\n '../output/paper_submission/zscore_by_cluster_rep.feather').reindex(\n fbgns_no_intron).dropna()\nax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,\n yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\")\nintronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(\n fbgns_no_intron), True, False)\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nmodel = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\nmodel = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-4": "import os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import fisher_exact, contingency\nfrom IPython.display import display, Markdown\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.api import formula as smf\nfrom tabulate import tabulate\nfrom larval_gonad.io import feather_to_cluster_rep_matrix\nfrom larval_gonad.stats import run_chisq\nfrom larval_gonad.plotting import plot_statsmodels_results\ntry:\n os.chdir(os.path.join(os.getcwd(), 'docs'))\n print(os.getcwd())\nexcept:\n pass\nfbgns_no_intron = pickle.load(open(\n '../output/paper_submission/intron_less_genes.pkl', 'rb'))\nbackground = pickle.load(open(\n '../output/paper_submission/background_fbgns.pkl', 'rb'))\nfbgn2chrom = pd.read_feather('../references/gene_annotation_dmel_r6-26.feather'\n , columns=['FBgn', 'FB_chrom']).set_index('FBgn').squeeze()\nchrx_fbgns = fbgn2chrom[fbgn2chrom == 'X'].index\nbias = pd.read_feather(\n '../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather').assign(\n gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0),\n True, False)).assign(pct_gonia=lambda x: x['pct.1']).assign(cyte_bias=\n lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False)\n ).assign(pct_cyte=lambda x: x['pct.2']).set_index('FBgn').loc[:, [\n 'gonia_bias', 'cyte_bias', 'pct_gonia', 'pct_cyte']].reindex(background\n ).dropna()\ndf = bias.copy().join(fbgn2chrom)\ndf['intronless'] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf['X'] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf['bias'] = 'NS'\ndf.loc[df.gonia_bias, 'bias'] = 'gonia'\ndf.loc[df.cyte_bias, 'bias'] = 'cyte'\ng = sns.FacetGrid(df, row='bias', row_order=['cyte', 'gonia', 'NS'], col=\n 'FB_chrom', col_order=['X', '2L', '2R', '3L', '3R'], sharex=True,\n sharey=True, margin_titles=True)\ng.map(sns.boxplot, 'intronless', 'pct_cyte', order=[False, True])\ng.set_ylabels(\"\"\"% Spermatocyte Cells\nWith Expression\"\"\")\ng.savefig('../output/docs/x_escapers_and_intronless_genes.svg', bbox_inches\n ='tight')\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nres\nzscores_intronless = feather_to_cluster_rep_matrix(\n '../output/paper_submission/zscore_by_cluster_rep.feather').reindex(\n fbgns_no_intron).dropna()\nax = sns.clustermap(zscores_intronless, col_cluster=False, xticklabels=True,\n yticklabels=False, cmap='viridis', vmin=-3, vmax=3, rasterized=True)\nax.ax_heatmap.set(xlabel='', ylabel='Intronless Genes')\nplt.savefig('../output/docs/x_escapers_and_intronless_genes_heatmap.svg',\n bbox_inches='tight')\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\")\nintronless2chrom['intronless'] = np.where(intronless2chrom.index.isin(\n fbgns_no_intron), True, False)\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), ['observed', 'adj std residual',\n 'flag_sig']), :]\ndisplay(res)\nprint(tabulate(res.reset_index(), headers='keys', showindex=False, tablefmt\n ='github'))\nmodel = smf.logit('intronless ~ cyte_bias + X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_main_effects.png', str(\n results.summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\nmodel = smf.logit('intronless ~ cyte_bias * X', data=df.replace({(True): 1,\n (False): 0}))\nresults = model.fit()\nplot_statsmodels_results(\n '../output/docs/x_escapers_and_intronless_genes_full.png', str(results.\n summary2()))\ndisplay(results.summary2())\nnp.exp(results.params).rename('Odds Ratio').to_frame()[results.pvalues <= 0.05]\n",
"step-5": "#%% [markdown]\n# # Look at intron-less gene enrichment in Cyte biased expressed genes.\n\n# This is a quick look at if parimary spermatocyte biased genes are enriched in intronless genes.\n# Yes this is what we see.\n\n#%%\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import fisher_exact, contingency\nfrom IPython.display import display, Markdown\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.api import formula as smf\nfrom tabulate import tabulate\n\nfrom larval_gonad.io import feather_to_cluster_rep_matrix\nfrom larval_gonad.stats import run_chisq\nfrom larval_gonad.plotting import plot_statsmodels_results\n\ntry:\n os.chdir(os.path.join(os.getcwd(), \"docs\"))\n print(os.getcwd())\nexcept:\n pass\n\n\n#%%\n# Get list of intronless FBgns\nfbgns_no_intron = pickle.load(open(\"../output/paper_submission/intron_less_genes.pkl\", \"rb\"))\nbackground = pickle.load(open(\"../output/paper_submission/background_fbgns.pkl\", \"rb\"))\n\n#%%\n# Get list of X chromosome genes\nfbgn2chrom = (\n pd.read_feather(\n \"../references/gene_annotation_dmel_r6-26.feather\", columns=[\"FBgn\", \"FB_chrom\"]\n )\n .set_index(\"FBgn\")\n .squeeze()\n)\nchrx_fbgns = fbgn2chrom[fbgn2chrom == \"X\"].index\n\n#%%\n# Get gonia biased and cyte biased genes\nbias = (\n pd.read_feather(\"../output/seurat3-cluster-wf/combined_n3_gonia_vs_cytes.feather\")\n .assign(gonia_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC > 0), True, False))\n .assign(pct_gonia=lambda x: x[\"pct.1\"])\n .assign(cyte_bias=lambda x: np.where((x.p_val_adj <= 0.01) & (x.avg_logFC < 0), True, False))\n .assign(pct_cyte=lambda x: x[\"pct.2\"])\n .set_index(\"FBgn\")\n .loc[:, [\"gonia_bias\", \"cyte_bias\", \"pct_gonia\", \"pct_cyte\"]]\n .reindex(background)\n .dropna()\n)\n\n#%%\n# Munge all into a dataframe\ndf = bias.copy().join(fbgn2chrom)\ndf[\"intronless\"] = np.where(df.index.isin(fbgns_no_intron), True, False)\ndf[\"X\"] = np.where(df.index.isin(chrx_fbgns), True, False)\ndf[\"bias\"] = \"NS\"\ndf.loc[df.gonia_bias, \"bias\"] = \"gonia\"\ndf.loc[df.cyte_bias, \"bias\"] = \"cyte\"\n\n#%% [markdown]\n# ## How are intronless genes expressed in primary spermatocytes?\n\n#%% [markdown]\n# ### Intronless genes are expressed in fewer cells than genes with introns.\n\n#%%\n# Plot percent cytes with expression by bias*chrom*intronless\ng = sns.FacetGrid(\n df,\n row=\"bias\",\n row_order=[\"cyte\", \"gonia\", \"NS\"],\n col=\"FB_chrom\",\n col_order=[\"X\", \"2L\", \"2R\", \"3L\", \"3R\"],\n sharex=True,\n sharey=True,\n margin_titles=True,\n)\ng.map(sns.boxplot, \"intronless\", \"pct_cyte\", order=[False, True])\ng.set_ylabels(\"% Spermatocyte Cells\\nWith Expression\")\ng.savefig(\"../output/docs/x_escapers_and_intronless_genes.svg\", bbox_inches=\"tight\")\n\n#%% [markdown]\n# ### However, intronless genes are enriched in genes with primary spermatocyte biased expression.\n\n#%%\n# Cross tab of intronless * bias\nct = pd.crosstab(df.intronless, df.bias)\nres = run_chisq(ct).loc[(slice(None), [\"observed\", \"adj std residual\", \"flag_sig\"]), :]\nprint(tabulate(res.reset_index(), headers=\"keys\", showindex=False, tablefmt=\"github\"))\nres\n\n#%%\nzscores_intronless = (\n feather_to_cluster_rep_matrix(\"../output/paper_submission/zscore_by_cluster_rep.feather\")\n .reindex(fbgns_no_intron)\n .dropna()\n)\n\nax = sns.clustermap(\n zscores_intronless,\n col_cluster=False,\n xticklabels=True,\n yticklabels=False,\n cmap=\"viridis\",\n vmin=-3,\n vmax=3,\n rasterized=True,\n)\nax.ax_heatmap.set(xlabel=\"\", ylabel=\"Intronless Genes\")\nplt.savefig(\"../output/docs/x_escapers_and_intronless_genes_heatmap.svg\", bbox_inches=\"tight\")\n\n\n#%% [markdown]\n# ## Are intronless genes enriched in X chromosome escapers?\n\n#%% [markdown]\n# ### Intronless genes are depleted on the X chromosome.\n\n#%%\n# intronless genes across the genome\nintronless2chrom = fbgn2chrom.to_frame().query(\n \"FB_chrom == ['X', '2L', '2R', '3L', '3R', '4', 'Y']\"\n)\nintronless2chrom[\"intronless\"] = np.where(intronless2chrom.index.isin(fbgns_no_intron), True, False)\n\nct = pd.crosstab(intronless2chrom.intronless, intronless2chrom.FB_chrom)\nres = run_chisq(ct).loc[(slice(None), [\"observed\", \"adj std residual\", \"flag_sig\"]), :]\ndisplay(res)\n\nprint(tabulate(res.reset_index(), headers=\"keys\", showindex=False, tablefmt=\"github\"))\n\n#%% [markdown]\n# ### X chromosome escapers are not enriched for intronless genes.\n\n#%% [markdown]\n# #### Main Effects Model Logit(intronless = cyte_biased + X chromosome)\n\n#%%\n# Main effects model\nmodel = smf.logit(\"intronless ~ cyte_bias + X\", data=df.replace({True: 1, False: 0}))\nresults = model.fit()\nplot_statsmodels_results(\n \"../output/docs/x_escapers_and_intronless_genes_main_effects.png\", str(results.summary2())\n)\ndisplay(results.summary2())\n\nnp.exp(results.params).rename(\"Odds Ratio\").to_frame()[results.pvalues <= 0.05]\n\n#%% [markdown]\n# #### Full Model Logit(intronless = cyte_biased + X chromosome + cyte_biased * X chromosome)\n\n#%%\n# FUll Model\nmodel = smf.logit(\"intronless ~ cyte_bias * X\", data=df.replace({True: 1, False: 0}))\nresults = model.fit()\nplot_statsmodels_results(\n \"../output/docs/x_escapers_and_intronless_genes_full.png\", str(results.summary2())\n)\ndisplay(results.summary2())\n\nnp.exp(results.params).rename(\"Odds Ratio\").to_frame()[results.pvalues <= 0.05]\n\n\n#%%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
w = int(input("Width ?"))
h= int(input("Height ?"))
for b in range(1,w+1):
print ("*", end='')
print("")
for i in range(1,h-1):
print ("*", end='')
for j in range(1,w-1):
print (" ", end='')
print ("*", end='')
print("")
for b in range(1,w+1):
print ("*", end='')
print("")
|
normal
|
{
"blob_id": "32b961f3971819fdbbe1a30fd7cf1883353c1854",
"index": 2294,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n",
"step-3": "w = int(input('Width ?'))\nh = int(input('Height ?'))\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n",
"step-4": "w = int(input(\"Width ?\"))\nh= int(input(\"Height ?\"))\n\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")\n\n\nfor i in range(1,h-1):\n\tprint (\"*\", end='')\n\tfor j in range(1,w-1):\n\t\tprint (\" \", end='')\n\tprint (\"*\", end='')\n\tprint(\"\")\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pytest
from ethereum.tools.tester import TransactionFailed
def test_cant_ever_init_twice(ethtester, root_chain):
ethtester.chain.mine()
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k0)
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k1)
|
normal
|
{
"blob_id": "8417b63e2b7b16d3d58175022662c5b3e59e4aaf",
"index": 4640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_cant_ever_init_twice(ethtester, root_chain):\n ethtester.chain.mine()\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k0)\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k1)\n",
"step-3": "import pytest\nfrom ethereum.tools.tester import TransactionFailed\n\n\ndef test_cant_ever_init_twice(ethtester, root_chain):\n ethtester.chain.mine()\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k0)\n with pytest.raises(TransactionFailed):\n root_chain.init(sender=ethtester.k1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('machine', '0001_initial')]
operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created', models.DateTimeField(
auto_now_add=True, null=True)), ('modified', models.DateTimeField(
auto_now=True, null=True)), ('address_of_delivery', models.
CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (
'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=
'Meru Town', max_length=50)), ('approved', models.BooleanField(
default=False)), ('active', models.BooleanField(default=True)), (
'paid', models.BooleanField(default=False))], options={'ordering':
('-created',)}), migrations.CreateModel(name='Request', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created', models.
DateTimeField(auto_now_add=True, null=True)), ('modified', models.
DateTimeField(auto_now=True, null=True)), ('price', models.
DecimalField(decimal_places=2, max_digits=6, null=True)), (
'quantity', models.PositiveIntegerField(default=1)), ('order',
models.ForeignKey(null=True, on_delete=django.db.models.deletion.
CASCADE, related_name='details', to='request.AboutRequest')), (
'product', models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='orders', to=settings.
AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('machine', '0001_initial')]
operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created', models.DateTimeField(
auto_now_add=True, null=True)), ('modified', models.DateTimeField(
auto_now=True, null=True)), ('address_of_delivery', models.
CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (
'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=
'Meru Town', max_length=50)), ('approved', models.BooleanField(
default=False)), ('active', models.BooleanField(default=True)), (
'paid', models.BooleanField(default=False))], options={'ordering':
('-created',)}), migrations.CreateModel(name='Request', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created', models.
DateTimeField(auto_now_add=True, null=True)), ('modified', models.
DateTimeField(auto_now=True, null=True)), ('price', models.
DecimalField(decimal_places=2, max_digits=6, null=True)), (
'quantity', models.PositiveIntegerField(default=1)), ('order',
models.ForeignKey(null=True, on_delete=django.db.models.deletion.
CASCADE, related_name='details', to='request.AboutRequest')), (
'product', models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='orders', to=settings.
AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]
<|reserved_special_token_1|>
# Generated by Django 2.1.4 on 2019-04-23 23:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('machine', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AboutRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),
('approved', models.BooleanField(default=False)),
('active', models.BooleanField(default=True)),
('paid', models.BooleanField(default=False)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('modified', models.DateTimeField(auto_now=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-quantity',),
},
),
]
|
flexible
|
{
"blob_id": "b9608208f71f25ae05ed9bd7bdf94b8882a26e06",
"index": 3091,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('machine', '0001_initial')]\n operations = [migrations.CreateModel(name='AboutRequest', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('created', models.DateTimeField(\n auto_now_add=True, null=True)), ('modified', models.DateTimeField(\n auto_now=True, null=True)), ('address_of_delivery', models.\n CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), (\n 'nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default=\n 'Meru Town', max_length=50)), ('approved', models.BooleanField(\n default=False)), ('active', models.BooleanField(default=True)), (\n 'paid', models.BooleanField(default=False))], options={'ordering':\n ('-created',)}), migrations.CreateModel(name='Request', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('created', models.\n DateTimeField(auto_now_add=True, null=True)), ('modified', models.\n DateTimeField(auto_now=True, null=True)), ('price', models.\n DecimalField(decimal_places=2, max_digits=6, null=True)), (\n 'quantity', models.PositiveIntegerField(default=1)), ('order',\n models.ForeignKey(null=True, on_delete=django.db.models.deletion.\n CASCADE, related_name='details', to='request.AboutRequest')), (\n 'product', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='orders', to=settings.\n AUTH_USER_MODEL))], options={'ordering': ('-quantity',)})]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-04-23 23:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('machine', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='AboutRequest',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('address_of_delivery', models.CharField(choices=[('meru', 'Meru Town'), ('kianjai', 'Kianjai'), ('nkubu', 'Nkubu'), ('maua', 'Maua'), ('Nchiu', 'Nchiru')], default='Meru Town', max_length=50)),\n ('approved', models.BooleanField(default=False)),\n ('active', models.BooleanField(default=True)),\n ('paid', models.BooleanField(default=False)),\n ],\n options={\n 'ordering': ('-created',),\n },\n ),\n migrations.CreateModel(\n name='Request',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created', models.DateTimeField(auto_now_add=True, null=True)),\n ('modified', models.DateTimeField(auto_now=True, null=True)),\n ('price', models.DecimalField(decimal_places=2, max_digits=6, null=True)),\n ('quantity', models.PositiveIntegerField(default=1)),\n ('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='details', to='request.AboutRequest')),\n ('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='order_item', to='machine.Machine')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ('-quantity',),\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('输入鲈鱼的先验概率例如:70,对应70%')
<|reserved_special_token_0|>
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
<|reserved_special_token_0|>
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
<|reserved_special_token_0|>
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
<|reserved_special_token_0|>
plt.plot(x, y, color='red')
plt.legend()
plt.show()
<|reserved_special_token_0|>
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
<|reserved_special_token_0|>
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
<|reserved_special_token_0|>
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
<|reserved_special_token_0|>
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
<|reserved_special_token_0|>
print(pb)
<|reserved_special_token_0|>
print(pab)
print(pab / pb)
<|reserved_special_token_1|>
__author__ = '那位先生Beer'
<|reserved_special_token_0|>
print('输入鲈鱼的先验概率例如:70,对应70%')
a = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname='c:\\windows\\fonts\\simsun.ttc', size=15)
data = xlrd.open_workbook('xqtest.xls')
shxrange = range(data.nsheets)
sh = data.sheet_by_name('1')
L = []
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
L = np.array(L)
L = L[:, 0:2]
G = []
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G = np.array(G)
G = G[:, 0:2]
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
x = np.linspace(0, 8)
y = -x + 9
plt.plot(x, y, color='red')
plt.legend()
plt.show()
count = 0
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
q = count / (int(a) * 50)
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
countG = 0
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
p = countG / ((100 - int(a)) * 50)
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
pb = int(a) / 100 * q + (1 - int(a) / 100) * p
print(pb)
pab = int(a) / 100 * q
print(pab)
print(pab / pb)
<|reserved_special_token_1|>
__author__ = '那位先生Beer'
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname='c:\\windows\\fonts\\simsun.ttc', size=15)
data = xlrd.open_workbook('xqtest.xls')
shxrange = range(data.nsheets)
sh = data.sheet_by_name('1')
L = []
for i in range(0, int(a) * 50):
rowa_data = sh.row_values(i)
L.append(rowa_data)
L = np.array(L)
L = L[:, 0:2]
G = []
for j in range(5000, 5000 + (100 - int(a)) * 50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G = np.array(G)
G = G[:, 0:2]
plt.figure(figsize=(8, 6))
plt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)
plt.xlabel('长度', fontproperties=font_set)
plt.ylabel('宽度', fontproperties=font_set)
plt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')
plt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')
x = np.linspace(0, 8)
y = -x + 9
plt.plot(x, y, color='red')
plt.legend()
plt.show()
count = 0
for i in L:
if i[0] + i[1] <= 9:
count = count + 1
q = count / (int(a) * 50)
print('鲈鱼准确率:%s' % (count / (int(a) * 50)))
countG = 0
for i in G:
if i[0] + i[1] >= 9:
countG = countG + 1
p = countG / ((100 - int(a)) * 50)
print('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))
pb = int(a) / 100 * q + (1 - int(a) / 100) * p
print(pb)
pab = int(a) / 100 * q
print(pab)
print(pab / pb)
<|reserved_special_token_1|>
__author__ = "那位先生Beer"
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import xlrd
import numpy as np
print('输入鲈鱼的先验概率例如:70,对应70%')
a=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')
font_set = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=15)
#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)
data=xlrd.open_workbook('xqtest.xls')
shxrange=range(data.nsheets)
sh=data.sheet_by_name("1")
L=[]
for i in range(0,(int(a))*50):
rowa_data=sh.row_values(i)
L.append(rowa_data)
L=np.array(L)
L=L[:,0:2]
G=[]
for j in range(5000,5000+(100-int(a))*50):
rowa_data = sh.row_values(j)
G.append(rowa_data)
G=np.array(G)
G=G[:,0:2]
plt.figure(figsize=(8,6))
plt.title("生成的鲈鱼和鲑鱼数据的散点图",fontproperties=font_set)
plt.xlabel("长度",fontproperties=font_set)
plt.ylabel("宽度",fontproperties=font_set)
plt.scatter(L[:,0],L[:,1],marker="o",label="鲈鱼")
plt.scatter(G[:,0],G[:,1],marker="s",label="鲑鱼")
# 分类模型
x = np.linspace(0,8)
y = -x+9
plt.plot(x,y, color="red")
plt.legend()
plt.show()
#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:
#计算准确率
count=0
for i in L:
if i[0]+i[1]<=9:
count=count+1
q=(count/((int(a))*50))
print('鲈鱼准确率:%s'%(count/((int(a))*50)))
countG=0
for i in G:
if i[0]+i[1]>=9:
countG=countG+1
p=(countG/((100-int(a))*50))
print('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))
#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)
pb=(int(a)/100)*q + (1-(int(a)/100))*p
print(pb)
#p(ab)=p(b|a)*p(a)
pab=(int(a)/100)*q
print(pab)
print(pab/pb)
|
flexible
|
{
"blob_id": "077b6d3d7417bbc26e9f23af6f437ff05e3d5771",
"index": 812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\n<mask token>\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\n<mask token>\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\n<mask token>\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\n<mask token>\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\n<mask token>\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\n<mask token>\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\n<mask token>\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\n<mask token>\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\n<mask token>\nprint(pb)\n<mask token>\nprint(pab)\nprint(pab / pb)\n",
"step-3": "__author__ = '那位先生Beer'\n<mask token>\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-4": "__author__ = '那位先生Beer'\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na = input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname='c:\\\\windows\\\\fonts\\\\simsun.ttc', size=15)\ndata = xlrd.open_workbook('xqtest.xls')\nshxrange = range(data.nsheets)\nsh = data.sheet_by_name('1')\nL = []\nfor i in range(0, int(a) * 50):\n rowa_data = sh.row_values(i)\n L.append(rowa_data)\nL = np.array(L)\nL = L[:, 0:2]\nG = []\nfor j in range(5000, 5000 + (100 - int(a)) * 50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG = np.array(G)\nG = G[:, 0:2]\nplt.figure(figsize=(8, 6))\nplt.title('生成的鲈鱼和鲑鱼数据的散点图', fontproperties=font_set)\nplt.xlabel('长度', fontproperties=font_set)\nplt.ylabel('宽度', fontproperties=font_set)\nplt.scatter(L[:, 0], L[:, 1], marker='o', label='鲈鱼')\nplt.scatter(G[:, 0], G[:, 1], marker='s', label='鲑鱼')\nx = np.linspace(0, 8)\ny = -x + 9\nplt.plot(x, y, color='red')\nplt.legend()\nplt.show()\ncount = 0\nfor i in L:\n if i[0] + i[1] <= 9:\n count = count + 1\nq = count / (int(a) * 50)\nprint('鲈鱼准确率:%s' % (count / (int(a) * 50)))\ncountG = 0\nfor i in G:\n if i[0] + i[1] >= 9:\n countG = countG + 1\np = countG / ((100 - int(a)) * 50)\nprint('鲑鱼准确率:%s' % (countG / ((100 - int(a)) * 50)))\npb = int(a) / 100 * q + (1 - int(a) / 100) * p\nprint(pb)\npab = int(a) / 100 * q\nprint(pab)\nprint(pab / pb)\n",
"step-5": "__author__ = \"那位先生Beer\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport xlrd\nimport numpy as np\nprint('输入鲈鱼的先验概率例如:70,对应70%')\na=input('输入鲈鱼的先验概率(鲑鱼对应的1减去剩余的):')\nfont_set = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=15)\n#根据生成的数据画出图像(横坐标为长度,纵坐标为亮度)\ndata=xlrd.open_workbook('xqtest.xls')\nshxrange=range(data.nsheets)\nsh=data.sheet_by_name(\"1\")\nL=[]\nfor i in range(0,(int(a))*50):\n rowa_data=sh.row_values(i)\n L.append(rowa_data)\nL=np.array(L)\nL=L[:,0:2]\n\nG=[]\nfor j in range(5000,5000+(100-int(a))*50):\n rowa_data = sh.row_values(j)\n G.append(rowa_data)\nG=np.array(G)\nG=G[:,0:2]\nplt.figure(figsize=(8,6))\nplt.title(\"生成的鲈鱼和鲑鱼数据的散点图\",fontproperties=font_set)\nplt.xlabel(\"长度\",fontproperties=font_set)\nplt.ylabel(\"宽度\",fontproperties=font_set)\nplt.scatter(L[:,0],L[:,1],marker=\"o\",label=\"鲈鱼\")\nplt.scatter(G[:,0],G[:,1],marker=\"s\",label=\"鲑鱼\")\n# 分类模型\nx = np.linspace(0,8)\ny = -x+9\nplt.plot(x,y, color=\"red\")\nplt.legend()\nplt.show()\n\n\n#模拟的数据鲈鱼比较小,可得出其在直线下面,即y+x<=9:\n#计算准确率\ncount=0\nfor i in L:\n if i[0]+i[1]<=9:\n count=count+1\nq=(count/((int(a))*50))\nprint('鲈鱼准确率:%s'%(count/((int(a))*50)))\ncountG=0\nfor i in G:\n if i[0]+i[1]>=9:\n countG=countG+1\np=(countG/((100-int(a))*50))\nprint('鲑鱼准确率:%s'%(countG/((100-int(a))*50)))\n\n#p(b)=p(b|a)*p(a) + p(b|-a)p(-a)\npb=(int(a)/100)*q + (1-(int(a)/100))*p\nprint(pb)\n#p(ab)=p(b|a)*p(a)\npab=(int(a)/100)*q\nprint(pab)\nprint(pab/pb)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def copy_credentials_file(hostname, username, password, src_path, dst_path):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
<|reserved_special_token_0|>
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print('\nCreating default AWS config...')
create_default_config(config_path)
print('Done.\n')
return config_path
<|reserved_special_token_0|>
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print('\nGrabbing AWS credentials from robot...')
copy_credentials_file(robot_name, username, password, src_path,
dst_path)
print('Done.\n')
return dst_path
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def copy_credentials_file(hostname, username, password, src_path, dst_path):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
<|reserved_special_token_0|>
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
<|reserved_special_token_0|>
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print('\nCreating default AWS config...')
create_default_config(config_path)
print('Done.\n')
return config_path
<|reserved_special_token_0|>
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print('\nGrabbing AWS credentials from robot...')
copy_credentials_file(robot_name, username, password, src_path,
dst_path)
print('Done.\n')
return dst_path
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def copy_credentials_file(hostname, username, password, src_path, dst_path):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
<|reserved_special_token_0|>
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
<|reserved_special_token_0|>
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print('\nCreating default AWS config...')
create_default_config(config_path)
print('Done.\n')
return config_path
<|reserved_special_token_0|>
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print('\nGrabbing AWS credentials from robot...')
copy_credentials_file(robot_name, username, password, src_path,
dst_path)
print('Done.\n')
return dst_path
<|reserved_special_token_0|>
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
<|reserved_special_token_1|>
import os, sys
import json
import paramiko
<|reserved_special_token_0|>
def copy_credentials_file(hostname, username, password, src_path, dst_path):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
<|reserved_special_token_0|>
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
<|reserved_special_token_0|>
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print('\nCreating default AWS config...')
create_default_config(config_path)
print('Done.\n')
return config_path
<|reserved_special_token_0|>
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print('\nGrabbing AWS credentials from robot...')
copy_credentials_file(robot_name, username, password, src_path,
dst_path)
print('Done.\n')
return dst_path
<|reserved_special_token_0|>
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
<|reserved_special_token_1|>
import os, sys
import json
import paramiko
"""
Copies the credentials.json file locally from robot
"""
def copy_credentials_file(hostname, username, password, src_path, dst_path):
# create ssh connection
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
# ftp file from robot to local path
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
"""
Creates a default config file for AWS
(aws_config.json)
"""
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
"""
Checks for the aws_config.json file,
creates the file and populates with default values
if not found.
"""
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print("\nCreating default AWS config...")
create_default_config(config_path)
print("Done.\n")
return config_path
"""
Checks for the credentials.json file,
creates the file and populates with values from
robot if not found.
"""
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print("\nGrabbing AWS credentials from robot...")
copy_credentials_file(robot_name, username, password, src_path, dst_path)
print("Done.\n")
return dst_path
"""
Reads and returns contents of JSON file
"""
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
|
flexible
|
{
"blob_id": "27f162f2e350fdb284740bd67f4293535f0ab593",
"index": 8451,
"step-1": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-4": "import os, sys\nimport json\nimport paramiko\n<mask token>\n\n\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.connect(hostname=hostname, username=username, password=password)\n ftp_client = ssh_client.open_sftp()\n ftp_client.get(src_path, dst_path)\n ftp_client.close()\n\n\n<mask token>\n\n\ndef create_default_config(path):\n data = {}\n data['method'] = 'GET'\n data['service'] = 'ec2'\n data['host'] = 'ec2.amazonaws.com'\n data['region'] = 'us-east-1'\n data['endpoint'] = 'https://ec2.amazonaws.com'\n with open(path, 'w+') as file:\n json.dump(data, file)\n\n\n<mask token>\n\n\ndef check_aws_config():\n config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n if not os.path.exists(config_path):\n print('\\nCreating default AWS config...')\n create_default_config(config_path)\n print('Done.\\n')\n return config_path\n\n\n<mask token>\n\n\ndef check_credentials():\n login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n login_data = load_json(login_file)\n robot_name = login_data['robot_name']\n username = login_data['username']\n password = login_data['password']\n src_path = '/var/jibo/credentials.json'\n dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n if not os.path.exists(dst_path):\n print('\\nGrabbing AWS credentials from robot...')\n copy_credentials_file(robot_name, username, password, src_path,\n dst_path)\n print('Done.\\n')\n return dst_path\n\n\n<mask token>\n\n\ndef load_json(path):\n with open(path, 'r') as file:\n data = json.load(file)\n return data\n",
"step-5": "import os, sys\nimport json\nimport paramiko\n\n\"\"\"\n\tCopies the credentials.json file locally from robot\n\"\"\"\ndef copy_credentials_file(hostname, username, password, src_path, dst_path):\n\t# create ssh connection\n\tssh_client = paramiko.SSHClient()\n\tssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\tssh_client.connect(hostname=hostname, username=username, password=password)\n\n\t# ftp file from robot to local path\n\tftp_client = ssh_client.open_sftp()\n\tftp_client.get(src_path, dst_path)\n\tftp_client.close()\n\n\n\"\"\"\n\tCreates a default config file for AWS\n\t(aws_config.json)\n\"\"\"\ndef create_default_config(path):\n\tdata = {}\n\tdata['method'] = 'GET'\n\tdata['service'] = 'ec2'\n\tdata['host'] = 'ec2.amazonaws.com'\n\tdata['region'] = 'us-east-1'\n\tdata['endpoint'] = 'https://ec2.amazonaws.com'\n\n\twith open(path, 'w+') as file:\n\t\tjson.dump(data, file)\n\n\n\"\"\"\n\tChecks for the aws_config.json file,\n\tcreates the file and populates with default values\n\tif not found.\n\"\"\"\ndef check_aws_config():\n\tconfig_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')\n\n\tif not os.path.exists(config_path):\n\t\tprint(\"\\nCreating default AWS config...\")\n\t\tcreate_default_config(config_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn config_path\n\n\n\"\"\"\n\tChecks for the credentials.json file,\n\tcreates the file and populates with values from\n\trobot if not found.\n\"\"\"\ndef check_credentials():\n\tlogin_file = os.path.expanduser('~/jibo/HubTest/config/login.json')\n\tlogin_data = load_json(login_file)\n\n\trobot_name = login_data['robot_name']\n\tusername = login_data['username']\n\tpassword = login_data['password']\n\t\n\tsrc_path = '/var/jibo/credentials.json'\n\tdst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')\n\n\tif not os.path.exists(dst_path):\n\t\tprint(\"\\nGrabbing AWS credentials from robot...\")\n\t\tcopy_credentials_file(robot_name, username, password, src_path, dst_path)\n\t\tprint(\"Done.\\n\")\n\n\treturn dst_path\n\n\n\"\"\"\n\tReads and returns contents of JSON file\n\"\"\"\ndef load_json(path):\n\twith open(path, 'r') as file:\n\t\tdata = json.load(file)\n\treturn data\n\n\t",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def getMediaId(contentProviderMediaName):
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=
'TravCatalog', password='travel', database=
'LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',
contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid = row[0]
break
return mediaid
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getMediaId(contentProviderMediaName):
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=
'TravCatalog', password='travel', database=
'LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',
contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid = row[0]
break
return mediaid
def main(messages_file, records):
print('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if mediaid != None:
jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)
print(json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print('> %s error' % message_number)
message_number += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getMediaId(contentProviderMediaName):
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=
'TravCatalog', password='travel', database=
'LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',
contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid = row[0]
break
return mediaid
def main(messages_file, records):
print('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if mediaid != None:
jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)
print(json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print('> %s error' % message_number)
message_number += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('messages_file', help=
'File with the messages to write. One message per line')
parser.add_argument('--records', default=-1, help=
'Number of messages to read')
args = parser.parse_args()
main(args.messages_file, int(args.records))
<|reserved_special_token_1|>
import argparse
import pymssql
import json
def getMediaId(contentProviderMediaName):
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=
'TravCatalog', password='travel', database=
'LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',
contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid = row[0]
break
return mediaid
def main(messages_file, records):
print('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if mediaid != None:
jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)
print(json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print('> %s error' % message_number)
message_number += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('messages_file', help=
'File with the messages to write. One message per line')
parser.add_argument('--records', default=-1, help=
'Number of messages to read')
args = parser.parse_args()
main(args.messages_file, int(args.records))
<|reserved_special_token_1|>
#!/usr/bin/env python
import argparse
import pymssql
import json
#get the lcmMediaId from DB.
def getMediaId(contentProviderMediaName):
#test db
conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
#prod db
#conn = pymssql.connect(host='LodgingCatalogMaster.ch.expeso.com', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')
cur = conn.cursor()
cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',contentProviderMediaName)
row = cur.fetchone()
mediaid = None
while row:
mediaid =row[0]
break
return mediaid
def main(messages_file, records):
print ('> Messages: %s; Records: %d' % (messages_file, records))
message_number = 0
with open(messages_file, 'r') as msgs_file:
for message in msgs_file:
if message_number >= records and records > 0:
break
if message.startswith('> '):
continue
try:
jsonMsg = json.loads(message)
mediaid = getMediaId(jsonMsg['fileName'])
if(mediaid != None):
jsonMsg['domainFields']['lcmMediaId']=str(mediaid)
print (json.dumps(jsonMsg))
except (RuntimeError, TypeError, NameError):
print ('> %s error' % message_number)
message_number += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'messages_file', help='File with the messages to write. One message per line'
)
parser.add_argument(
'--records', default=-1, help='Number of messages to read'
)
args = parser.parse_args()
main(args.messages_file, int(args.records))
|
flexible
|
{
"blob_id": "a5b7f565a1797e5f326bcf26ff7c8ad2469dca70",
"index": 7442,
"step-1": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('messages_file', help=\n 'File with the messages to write. One message per line')\n parser.add_argument('--records', default=-1, help=\n 'Number of messages to read')\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-4": "import argparse\nimport pymssql\nimport json\n\n\ndef getMediaId(contentProviderMediaName):\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user=\n 'TravCatalog', password='travel', database=\n 'LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',\n contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid = row[0]\n break\n return mediaid\n\n\ndef main(messages_file, records):\n print('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if mediaid != None:\n jsonMsg['domainFields']['lcmMediaId'] = str(mediaid)\n print(json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print('> %s error' % message_number)\n message_number += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('messages_file', help=\n 'File with the messages to write. One message per line')\n parser.add_argument('--records', default=-1, help=\n 'Number of messages to read')\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-5": "#!/usr/bin/env python\nimport argparse\nimport pymssql\nimport json\n\n#get the lcmMediaId from DB.\ndef getMediaId(contentProviderMediaName):\n #test db\n conn = pymssql.connect(host='CHELLSSSQL23.karmalab.net', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')\n #prod db\n #conn = pymssql.connect(host='LodgingCatalogMaster.ch.expeso.com', user='TravCatalog', password='travel', database='LodgingCatalogMaster_Phoenix')\n cur = conn.cursor()\n cur.execute('SELECT * FROM media WHERE contentprovidermedianame =%s',contentProviderMediaName)\n row = cur.fetchone()\n mediaid = None\n while row:\n mediaid =row[0]\n break\n return mediaid\n\ndef main(messages_file, records):\n print ('> Messages: %s; Records: %d' % (messages_file, records))\n message_number = 0\n with open(messages_file, 'r') as msgs_file:\n for message in msgs_file:\n if message_number >= records and records > 0:\n break\n if message.startswith('> '):\n continue\n try:\n jsonMsg = json.loads(message)\n mediaid = getMediaId(jsonMsg['fileName'])\n if(mediaid != None):\n jsonMsg['domainFields']['lcmMediaId']=str(mediaid)\n print (json.dumps(jsonMsg))\n except (RuntimeError, TypeError, NameError):\n print ('> %s error' % message_number)\n message_number += 1\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'messages_file', help='File with the messages to write. One message per line'\n )\n parser.add_argument(\n '--records', default=-1, help='Number of messages to read'\n )\n args = parser.parse_args()\n main(args.messages_file, int(args.records))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import socket
import select
import time
"""=====================Head Define====================="""
UDP_RECEIVE_TIMEOUT = 1
LOOP_DELAY = 1
"""=====================Class====================="""
class UDP_packet:
def __init__(self,board_info, board_add, state):
self.board_type = int("{0:08b}".format(board_info)[:4], 2)
self.board_num = int("{0:08b}".format(board_info)[4:], 2)
self.board_add = board_add
self.state = state
def __str__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
def __repr__(self):
return "Type:{}, Num:{}, Addr:{}, State:{}".format(self.board_type, self.board_num, self.board_add, self.state)
"""=====================Support functions====================="""
def init_UDP_connection(DEBUG_MODE=False):
if DEBUG_MODE:
UDP_MASTER_IP = "127.0.0.2"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "127.0.0.1"
UDP_PC_PORT = 5006
else:
UDP_MASTER_IP = "192.168.1.26"
UDP_MASTER_PORT = 5005
UDP_PC_IP = "192.168.1.25"
UDP_PC_PORT = 5005
print("My IP is: {0}, PORT: {1}\nTarget IP is: {0}, PORT: {1}".format(UDP_PC_IP, UDP_PC_PORT,UDP_MASTER_IP, UDP_MASTER_PORT))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
sock.bind((UDP_PC_IP, UDP_PC_PORT))
return sock, UDP_MASTER_IP, UDP_MASTER_PORT
"""===================== MAIN ====================="""
def main(sock):
data = b"HELLO"
while True:
ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)
if ready[0]:
data, _ = sock.recvfrom(80) # buffer size is 1024 bytes
print("PC: I just received message: [{0}]".format(data))
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
else:
sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))
print("PC: I just Sent a [{0}]".format(data))
if __name__ == '__main__':
sock = init_UDP_connection()
main(sock)
|
normal
|
{
"blob_id": "7c2a59f698b75d0de89a16310d97a01506c99cb3",
"index": 9840,
"step-1": "<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-3": "<mask token>\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-4": "import socket\nimport select\nimport time\n<mask token>\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n<mask token>\n\n\nclass UDP_packet:\n\n def __init__(self, board_info, board_add, state):\n self.board_type = int('{0:08b}'.format(board_info)[:4], 2)\n self.board_num = int('{0:08b}'.format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return 'Type:{}, Num:{}, Addr:{}, State:{}'.format(self.board_type,\n self.board_num, self.board_add, self.state)\n\n\n<mask token>\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = '127.0.0.2'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '127.0.0.1'\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = '192.168.1.26'\n UDP_MASTER_PORT = 5005\n UDP_PC_IP = '192.168.1.25'\n UDP_PC_PORT = 5005\n print('My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}'.\n format(UDP_PC_IP, UDP_PC_PORT, UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n<mask token>\n\n\ndef main(sock):\n data = b'HELLO'\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80)\n print('PC: I just received message: [{0}]'.format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print('PC: I just Sent a [{0}]'.format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)\n",
"step-5": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport socket\nimport select\nimport time\n\n\"\"\"=====================Head Define=====================\"\"\"\nUDP_RECEIVE_TIMEOUT = 1\nLOOP_DELAY = 1\n\n\"\"\"=====================Class=====================\"\"\"\n\n\nclass UDP_packet:\n def __init__(self,board_info, board_add, state):\n self.board_type = int(\"{0:08b}\".format(board_info)[:4], 2)\n self.board_num = int(\"{0:08b}\".format(board_info)[4:], 2)\n self.board_add = board_add\n self.state = state\n\n def __str__(self):\n return \"Type:{}, Num:{}, Addr:{}, State:{}\".format(self.board_type, self.board_num, self.board_add, self.state)\n\n def __repr__(self):\n return \"Type:{}, Num:{}, Addr:{}, State:{}\".format(self.board_type, self.board_num, self.board_add, self.state)\n\n\n\"\"\"=====================Support functions=====================\"\"\"\n\n\ndef init_UDP_connection(DEBUG_MODE=False):\n if DEBUG_MODE:\n UDP_MASTER_IP = \"127.0.0.2\"\n UDP_MASTER_PORT = 5005\n \n UDP_PC_IP = \"127.0.0.1\"\n UDP_PC_PORT = 5006\n else:\n UDP_MASTER_IP = \"192.168.1.26\"\n UDP_MASTER_PORT = 5005\n \n UDP_PC_IP = \"192.168.1.25\"\n UDP_PC_PORT = 5005 \n print(\"My IP is: {0}, PORT: {1}\\nTarget IP is: {0}, PORT: {1}\".format(UDP_PC_IP, UDP_PC_PORT,UDP_MASTER_IP, UDP_MASTER_PORT))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setblocking(0)\n sock.bind((UDP_PC_IP, UDP_PC_PORT))\n\n return sock, UDP_MASTER_IP, UDP_MASTER_PORT\n\n\n\"\"\"===================== MAIN =====================\"\"\"\n\n\ndef main(sock):\n\n data = b\"HELLO\"\n while True:\n ready = select.select([sock], [], [], UDP_RECEIVE_TIMEOUT)\n if ready[0]:\n data, _ = sock.recvfrom(80) # buffer size is 1024 bytes\n print(\"PC: I just received message: [{0}]\".format(data))\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print(\"PC: I just Sent a [{0}]\".format(data))\n else:\n sock.sendto(data, (UDP_MASTER_IP, UDP_MASTER_PORT))\n print(\"PC: I just Sent a [{0}]\".format(data))\n\n\nif __name__ == '__main__':\n sock = init_UDP_connection()\n main(sock)",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# Generated by Django 2.1.2 on 2018-10-25 09:36
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('grafit', '0002_article'),
]
operations = [
migrations.RunSQL("""
INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.
10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]
On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries
MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.
Indexing
Fields in a MongoDB document can be indexed with primary and secondary indices.
Replication
MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.
Load balancing[10]
MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.
MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');
INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]
Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9]
Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)
Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');
INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]
Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]
In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');
INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system."
"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services."
"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."');
INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.
Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).
Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.
According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc."
phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.
Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.
Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.
"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post.
However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.
"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc."
Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.
');
INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.
When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.
"Who care about database design? What about mockups? What about workflows?"
Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses.
That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.
We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used.
Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.
That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.
Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.
"Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that."
Except you couldn''t enter a customer without an order.
Except you couldn''t enter an order without at least one item on it.
Except you couldn''t enter an item unless it was listed in inventory.
Except that reserved the "inventory" item and made it unavailable.
Except, except, except ...
It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.
Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.
If they had simply had a proper database design up front, they could have reused their existing system with little trouble.
That''s what bad database design costs you and why I usually start with that before writing my software.
Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');
INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.
Learn more about actions
As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');
INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '
The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.
Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.
Affected products
GitHub Desktop
GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.
Atom
Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.
Ensure you’re on the latest Atom release by completing any of the following:
Windows: From the toolbar, click Help -> Check for Updates
MacOS: From the menu bar, click Atom -> Check for Update
Linux: Update manually by downloading the latest release from atom.io
Git on the command line and other clients
In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.
Additional notes
Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.
Details of the vulnerability
This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.
The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.
The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).
The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.
We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).
Please update your copy of Git soon, and happy cloning!
');
INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.
The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.
The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.
So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.
To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.
What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');
INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.
rbanffy on Aug 18, 2012 [-]
I think this would be a mistake.
This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.
All that is lost is the MySQL name and brand.
PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.
Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.
Udo on Aug 18, 2012 [-]
I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!
MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.
Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.
Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.
The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');
INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?
Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');
INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.
Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.
');
INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.
This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.
What is PostgreSQL?
PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.
A Brief History of PostgreSQL
PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.
1977-1985 − A project called INGRES was developed.
Proof-of-concept for relational databases
Established the company Ingres in 1980
Bought by Computer Associates in 1994
1986-1994 − POSTGRES
Development of the concepts in INGRES with a focus on object orientation and the query language - Quel
The code base of INGRES was not used as a basis for POSTGRES
Commercialized as Illustra (bought by Informix, bought by IBM)
1994-1995 − Postgres95
Support for SQL was added in 1994
Released as Postgres95 in 1995
Re-released as PostgreSQL 6.0 in 1996
Establishment of the PostgreSQL Global Development Team
Key Features of PostgreSQL
PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).
PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −
Complex SQL queries
SQL Sub-selects
Foreign keys
Trigger
Views
Transactions
Multiversion concurrency control (MVCC)
Streaming Replication (as of 9.0)
Hot Standby (as of 9.0)
You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −
Data types
Functions
Operators
Aggregate functions
Index methods
Procedural Languages Support
PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');
INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.
I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.
Install Directly or not?
On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.
In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.
Installing Docker
Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.
After logging back in I then got the following message about hardware-assisted virtualization not being enabled.
After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.
Open a command prompt and run the following command.
docker run hello-world
You should output that starts with the following if your installation is working.
Hello from Docker!
This message shows that your installation appears to be working correctly.
What about Postgres?
Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres
The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.
docker create -v /var/lib/postgresql/data --name PostgresData alpine
The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.
Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres
The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.
If you run the docker ps -a command it will show you all your containers.
As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.
');
INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.
The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)
Step 1: Install PostgreSQL
Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):
Open a terminal window.
Issue the command sudo apt-get install postgresql.
Type the sudo password necessary to give you admin rights and hit Enter.
Allow apt to pick up any necessary dependencies.
Once the installation is complete, it''s time to set this baby up.
Step 2: Change the default user password
Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.
Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:
Open a terminal window.
Issue the command sudo passwd postgres.
Type (and confirm) that password to be used for this user.
The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:
postgres=#
All other users have to gain access to the prompt like so:
psql DB_NAME
where DB_NAME is the name of an existing database.
');
INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.
This seems to be the journey:
1. Lack of migrations is awesome! We can iterate so quickly for MVP
2. Get users
3. Add features, still enjoying the speed of iteration
4. Get more users
5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)
6. Realise you desperately need joins, transactions and other SQL features
7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.
I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
My thought is definitely yes.
brandur on Aug 29, 2017 [-]
> I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.
The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.
The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.
Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.
martinald on Aug 29, 2017 [-]
I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.
I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.
Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.
I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.
I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');
INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.
The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.
Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.
As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.
A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.
Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2
10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');
INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.
I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”
I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.
Red Bull could sponsor it. I’d buy a T-shirt.
kbenson 8 months ago [-]
That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.
You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.
If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.
Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...
1: https://github.com/gothinkster/realworld
2: https://www.techempower.com/benchmarks/
etxm 8 months ago [-]
Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)
It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.
It would be cool to see things like disaster recovery and chaos proofing as well. ');
INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.
Leveraging the community
There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.
On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.
For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.
Always quality focused
No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.
Learn more');
INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.
Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.
Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.
maxxxxx 8 months ago [-]
Agreed. Switching to another system is expensive and the benefit is pretty questionable.
emsy 8 months ago [-]
Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.
TremendousJudge 8 months ago [-]
expand, please?
maxxxxx 8 months ago [-]
I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.
In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.
gopalv 8 months ago [-]
> Depending on your data some databases may be better than others and that should be easy to measure.
And the performance difference could be an accidental feature of the design and completely unintentional.
Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.
Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).
When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.
And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.
Though how it came about isn''t really intentional. ');
"""),
]
|
normal
|
{
"blob_id": "8b0eed6d1f24b5dd30726ce08c97354a5d5ab69b",
"index": 7597,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n",
"step-4": "import django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-25 09:36\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('grafit', '0002_article'),\n ]\n\n operations = [\n migrations.RunSQL(\"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestModel(tl.LightningModule):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
<|reserved_special_token_0|>
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import tf_lightning as tl
import tensorflow as tf
class TestModel(tl.LightningModule):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.
keras.layers.Dense(2)])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(
self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(
self.batch_size)
return dataset
if __name__ == '__main__':
model = TestModel()
dataloader = TestDataLoader()
trainer = tl.Trainer()
trainer.fit(model, dataloader)
<|reserved_special_token_1|>
# __author__ = 'Vasudev Gupta'
import tf_lightning as tl
import tensorflow as tf
class TestModel(tl.LightningModule):
# just a random model with random dataset
def __init__(self):
# simple test model
super().__init__()
self.model = tf.keras.Sequential([
tf.keras.layers.Dense(5),
tf.keras.layers.Dense(2)
])
def call(self, dataset):
return self.model(dataset)
def configure_optimizers(self):
return tf.keras.optimizers.Adam(0.1),
def training_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'tr_loss': loss}
result = tl.TrainResult(
loss, self.model.trainable_variables, log=log)
return result
def validation_step(self, batch, batch_idx, optimizer_idx):
pred = self(batch)
loss = tf.reduce_mean(pred)
log = {'batch_idx': batch_idx, 'val_loss': loss}
result = tl.EvalResult(loss, log=log)
return result
def checkpointer(self):
return tf.train.Checkpoint(m=self.model,
opt0=self.optimizer_0)
class TestDataLoader(tl.LightningDataModule):
# using random dataset
def __init__(self):
self.batch_size = 32
def setup(self):
self.tr_dataset = tf.random.normal((256, 7))
self.val_dataset = tf.random.normal((64, 7))
def train_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(
self.tr_dataset).batch(self.batch_size)
return dataset
def val_dataloader(self):
dataset = tf.data.Dataset.from_tensor_slices(
self.val_dataset).batch(self.batch_size)
return dataset
if __name__ == '__main__':
model = TestModel()
dataloader = TestDataLoader()
trainer = tl.Trainer()
trainer.fit(model, dataloader)
|
flexible
|
{
"blob_id": "f2397ba3fe1452238f251111f35b06b4a93e0359",
"index": 2441,
"step-1": "<mask token>\n\n\nclass TestModel(tl.LightningModule):\n <mask token>\n <mask token>\n <mask token>\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'tr_loss': loss}\n result = tl.TrainResult(loss, self.model.trainable_variables, log=log)\n return result\n <mask token>\n <mask token>\n\n\nclass TestDataLoader(tl.LightningDataModule):\n\n def __init__(self):\n self.batch_size = 32\n\n def setup(self):\n self.tr_dataset = tf.random.normal((256, 7))\n self.val_dataset = tf.random.normal((64, 7))\n\n def train_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(\n self.batch_size)\n return dataset\n\n def val_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(\n self.batch_size)\n return dataset\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestModel(tl.LightningModule):\n\n def __init__(self):\n super().__init__()\n self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.\n keras.layers.Dense(2)])\n\n def call(self, dataset):\n return self.model(dataset)\n\n def configure_optimizers(self):\n return tf.keras.optimizers.Adam(0.1),\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'tr_loss': loss}\n result = tl.TrainResult(loss, self.model.trainable_variables, log=log)\n return result\n\n def validation_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'val_loss': loss}\n result = tl.EvalResult(loss, log=log)\n return result\n <mask token>\n\n\nclass TestDataLoader(tl.LightningDataModule):\n\n def __init__(self):\n self.batch_size = 32\n\n def setup(self):\n self.tr_dataset = tf.random.normal((256, 7))\n self.val_dataset = tf.random.normal((64, 7))\n\n def train_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(\n self.batch_size)\n return dataset\n\n def val_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(\n self.batch_size)\n return dataset\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestModel(tl.LightningModule):\n\n def __init__(self):\n super().__init__()\n self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.\n keras.layers.Dense(2)])\n\n def call(self, dataset):\n return self.model(dataset)\n\n def configure_optimizers(self):\n return tf.keras.optimizers.Adam(0.1),\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'tr_loss': loss}\n result = tl.TrainResult(loss, self.model.trainable_variables, log=log)\n return result\n\n def validation_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'val_loss': loss}\n result = tl.EvalResult(loss, log=log)\n return result\n\n def checkpointer(self):\n return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)\n\n\nclass TestDataLoader(tl.LightningDataModule):\n\n def __init__(self):\n self.batch_size = 32\n\n def setup(self):\n self.tr_dataset = tf.random.normal((256, 7))\n self.val_dataset = tf.random.normal((64, 7))\n\n def train_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(\n self.batch_size)\n return dataset\n\n def val_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(\n self.batch_size)\n return dataset\n\n\n<mask token>\n",
"step-4": "import tf_lightning as tl\nimport tensorflow as tf\n\n\nclass TestModel(tl.LightningModule):\n\n def __init__(self):\n super().__init__()\n self.model = tf.keras.Sequential([tf.keras.layers.Dense(5), tf.\n keras.layers.Dense(2)])\n\n def call(self, dataset):\n return self.model(dataset)\n\n def configure_optimizers(self):\n return tf.keras.optimizers.Adam(0.1),\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'tr_loss': loss}\n result = tl.TrainResult(loss, self.model.trainable_variables, log=log)\n return result\n\n def validation_step(self, batch, batch_idx, optimizer_idx):\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n log = {'batch_idx': batch_idx, 'val_loss': loss}\n result = tl.EvalResult(loss, log=log)\n return result\n\n def checkpointer(self):\n return tf.train.Checkpoint(m=self.model, opt0=self.optimizer_0)\n\n\nclass TestDataLoader(tl.LightningDataModule):\n\n def __init__(self):\n self.batch_size = 32\n\n def setup(self):\n self.tr_dataset = tf.random.normal((256, 7))\n self.val_dataset = tf.random.normal((64, 7))\n\n def train_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.tr_dataset).batch(\n self.batch_size)\n return dataset\n\n def val_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.val_dataset).batch(\n self.batch_size)\n return dataset\n\n\nif __name__ == '__main__':\n model = TestModel()\n dataloader = TestDataLoader()\n trainer = tl.Trainer()\n trainer.fit(model, dataloader)\n",
"step-5": "# __author__ = 'Vasudev Gupta'\n\nimport tf_lightning as tl\nimport tensorflow as tf\n\n\nclass TestModel(tl.LightningModule):\n # just a random model with random dataset\n\n def __init__(self):\n # simple test model\n super().__init__()\n\n self.model = tf.keras.Sequential([\n tf.keras.layers.Dense(5),\n tf.keras.layers.Dense(2)\n ])\n\n def call(self, dataset):\n return self.model(dataset)\n\n def configure_optimizers(self):\n return tf.keras.optimizers.Adam(0.1),\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n\n log = {'batch_idx': batch_idx, 'tr_loss': loss}\n result = tl.TrainResult(\n loss, self.model.trainable_variables, log=log)\n\n return result\n\n def validation_step(self, batch, batch_idx, optimizer_idx):\n\n pred = self(batch)\n loss = tf.reduce_mean(pred)\n\n log = {'batch_idx': batch_idx, 'val_loss': loss}\n result = tl.EvalResult(loss, log=log)\n\n return result\n\n def checkpointer(self):\n return tf.train.Checkpoint(m=self.model,\n opt0=self.optimizer_0)\n\n\nclass TestDataLoader(tl.LightningDataModule):\n # using random dataset\n\n def __init__(self):\n self.batch_size = 32\n\n def setup(self):\n self.tr_dataset = tf.random.normal((256, 7))\n self.val_dataset = tf.random.normal((64, 7))\n\n def train_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n self.tr_dataset).batch(self.batch_size)\n return dataset\n\n def val_dataloader(self):\n dataset = tf.data.Dataset.from_tensor_slices(\n self.val_dataset).batch(self.batch_size)\n return dataset\n\n\nif __name__ == '__main__':\n\n model = TestModel()\n\n dataloader = TestDataLoader()\n\n trainer = tl.Trainer()\n\n trainer.fit(model, dataloader)\n",
"step-ids": [
7,
11,
12,
14,
15
]
}
|
[
7,
11,
12,
14,
15
] |
botnet = open("bots.txt","r")
bots = botnet.read()
print(bots.split('\n'))
botnet.close()
|
normal
|
{
"blob_id": "ea876d903263c907f63b2f37a81f2576345dae62",
"index": 7692,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(bots.split('\\n'))\nbotnet.close()\n",
"step-3": "botnet = open('bots.txt', 'r')\nbots = botnet.read()\nprint(bots.split('\\n'))\nbotnet.close()\n",
"step-4": "botnet = open(\"bots.txt\",\"r\")\nbots = botnet.read()\nprint(bots.split('\\n'))\nbotnet.close()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.7 on 2021-02-24 05:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('autotasks', '0017_auto_20210210_1512'),
]
operations = [
migrations.AddField(
model_name='automatedtask',
name='run_asap_after_missed',
field=models.BooleanField(default=False),
),
]
|
normal
|
{
"blob_id": "3ab1de77147f6abfabeea10f2a4e85686edffd6f",
"index": 2573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('autotasks', '0017_auto_20210210_1512')]\n operations = [migrations.AddField(model_name='automatedtask', name=\n 'run_asap_after_missed', field=models.BooleanField(default=False))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('autotasks', '0017_auto_20210210_1512')]\n operations = [migrations.AddField(model_name='automatedtask', name=\n 'run_asap_after_missed', field=models.BooleanField(default=False))]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-02-24 05:37\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('autotasks', '0017_auto_20210210_1512'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='automatedtask',\n name='run_asap_after_missed',\n field=models.BooleanField(default=False),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main() ->None:
"""Entry point."""
parser = argparse.ArgumentParser(description='Motion planning script.')
parser.add_argument('--params-file-path', '-p', type=str, required=
False, default=os.path.join(os.path.dirname(__file__) +
'/motion_params.json'), help='the parameter file path')
parser.add_argument('--debug', '-d', type=bool, required=False, default
=False, help='set logging level to debug')
parser.add_argument('--output', '-o', type=str, required=False, default
=os.path.join(os.path.dirname(__file__) + '/motion_output.json'),
help='the output file path')
parser.add_argument('--blend-log', '-b', choices=['last', 'all'],
required=False, default='last', help=
'output the last list or all of the blend log')
args = parser.parse_args()
if args.debug:
LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG
LOG_CONFIG['loggers']['']['level'] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, 'r') as f:
params = json.load(f)
constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**
params['constraints'][axis]) for axis in AXIS_NAMES}
origin_from_file: List[float] = cast(List[float], params['origin'])
origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64
(c) for c in origin_from_file)))
target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[
'coordinates'])), target['max_speed']) for target in params[
'target_list']]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(origin=origin, target_list=
target_list, iteration_limit=params['iteration_limit'])
output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list
(vectorize(origin))}
def myconverter(obj: Any) ->Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, 'w') as f:
json.dump(output, f, indent=2, default=myconverter)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers':
False, 'formatters': {'basic': {'format':
'%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {
'stream_handler': {'class': 'logging.StreamHandler', 'formatter':
'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [
'stream_handler'], 'level': logging.DEBUG}}}
def main() ->None:
"""Entry point."""
parser = argparse.ArgumentParser(description='Motion planning script.')
parser.add_argument('--params-file-path', '-p', type=str, required=
False, default=os.path.join(os.path.dirname(__file__) +
'/motion_params.json'), help='the parameter file path')
parser.add_argument('--debug', '-d', type=bool, required=False, default
=False, help='set logging level to debug')
parser.add_argument('--output', '-o', type=str, required=False, default
=os.path.join(os.path.dirname(__file__) + '/motion_output.json'),
help='the output file path')
parser.add_argument('--blend-log', '-b', choices=['last', 'all'],
required=False, default='last', help=
'output the last list or all of the blend log')
args = parser.parse_args()
if args.debug:
LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG
LOG_CONFIG['loggers']['']['level'] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, 'r') as f:
params = json.load(f)
constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**
params['constraints'][axis]) for axis in AXIS_NAMES}
origin_from_file: List[float] = cast(List[float], params['origin'])
origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64
(c) for c in origin_from_file)))
target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[
'coordinates'])), target['max_speed']) for target in params[
'target_list']]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(origin=origin, target_list=
target_list, iteration_limit=params['iteration_limit'])
output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list
(vectorize(origin))}
def myconverter(obj: Any) ->Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, 'w') as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
AXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers':
False, 'formatters': {'basic': {'format':
'%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {
'stream_handler': {'class': 'logging.StreamHandler', 'formatter':
'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [
'stream_handler'], 'level': logging.DEBUG}}}
def main() ->None:
"""Entry point."""
parser = argparse.ArgumentParser(description='Motion planning script.')
parser.add_argument('--params-file-path', '-p', type=str, required=
False, default=os.path.join(os.path.dirname(__file__) +
'/motion_params.json'), help='the parameter file path')
parser.add_argument('--debug', '-d', type=bool, required=False, default
=False, help='set logging level to debug')
parser.add_argument('--output', '-o', type=str, required=False, default
=os.path.join(os.path.dirname(__file__) + '/motion_output.json'),
help='the output file path')
parser.add_argument('--blend-log', '-b', choices=['last', 'all'],
required=False, default='last', help=
'output the last list or all of the blend log')
args = parser.parse_args()
if args.debug:
LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG
LOG_CONFIG['loggers']['']['level'] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, 'r') as f:
params = json.load(f)
constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**
params['constraints'][axis]) for axis in AXIS_NAMES}
origin_from_file: List[float] = cast(List[float], params['origin'])
origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64
(c) for c in origin_from_file)))
target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[
'coordinates'])), target['max_speed']) for target in params[
'target_list']]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(origin=origin, target_list=
target_list, iteration_limit=params['iteration_limit'])
output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list
(vectorize(origin))}
def myconverter(obj: Any) ->Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, 'w') as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import json
import logging
from logging.config import dictConfig
import argparse
import numpy as np
from opentrons_hardware.hardware_control.motion_planning import move_manager
from opentrons_hardware.hardware_control.motion_planning.types import AxisConstraints, SystemConstraints, MoveTarget, vectorize, Coordinates
from typing import Dict, Any, List, cast
AXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers':
False, 'formatters': {'basic': {'format':
'%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {
'stream_handler': {'class': 'logging.StreamHandler', 'formatter':
'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [
'stream_handler'], 'level': logging.DEBUG}}}
def main() ->None:
"""Entry point."""
parser = argparse.ArgumentParser(description='Motion planning script.')
parser.add_argument('--params-file-path', '-p', type=str, required=
False, default=os.path.join(os.path.dirname(__file__) +
'/motion_params.json'), help='the parameter file path')
parser.add_argument('--debug', '-d', type=bool, required=False, default
=False, help='set logging level to debug')
parser.add_argument('--output', '-o', type=str, required=False, default
=os.path.join(os.path.dirname(__file__) + '/motion_output.json'),
help='the output file path')
parser.add_argument('--blend-log', '-b', choices=['last', 'all'],
required=False, default='last', help=
'output the last list or all of the blend log')
args = parser.parse_args()
if args.debug:
LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG
LOG_CONFIG['loggers']['']['level'] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, 'r') as f:
params = json.load(f)
constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**
params['constraints'][axis]) for axis in AXIS_NAMES}
origin_from_file: List[float] = cast(List[float], params['origin'])
origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64
(c) for c in origin_from_file)))
target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[
'coordinates'])), target['max_speed']) for target in params[
'target_list']]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(origin=origin, target_list=
target_list, iteration_limit=params['iteration_limit'])
output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list
(vectorize(origin))}
def myconverter(obj: Any) ->Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, 'w') as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""A simple script to create a motion plan."""
import os
import json
import logging
from logging.config import dictConfig
import argparse
import numpy as np
from opentrons_hardware.hardware_control.motion_planning import move_manager
from opentrons_hardware.hardware_control.motion_planning.types import (
AxisConstraints,
SystemConstraints,
MoveTarget,
vectorize,
Coordinates,
)
from typing import Dict, Any, List, cast
AXIS_NAMES = ["X", "Y", "Z", "A", "B", "C"]
log = logging.getLogger(__name__)
LOG_CONFIG: Dict[str, Any] = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"basic": {"format": "%(asctime)s %(name)s %(levelname)s %(message)s"}
},
"handlers": {
"stream_handler": {
"class": "logging.StreamHandler",
"formatter": "basic",
"level": logging.INFO,
},
},
"loggers": {
"": {
"handlers": ["stream_handler"],
"level": logging.DEBUG,
},
},
}
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(description="Motion planning script.")
parser.add_argument(
"--params-file-path",
"-p",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_params.json"),
help="the parameter file path",
)
parser.add_argument(
"--debug",
"-d",
type=bool,
required=False,
default=False,
help="set logging level to debug",
)
parser.add_argument(
"--output",
"-o",
type=str,
required=False,
default=os.path.join(os.path.dirname(__file__) + "/motion_output.json"),
help="the output file path",
)
parser.add_argument(
"--blend-log",
"-b",
choices=["last", "all"],
required=False,
default="last",
help="output the last list or all of the blend log",
)
args = parser.parse_args()
if args.debug:
LOG_CONFIG["handlers"]["stream_handler"]["level"] = logging.DEBUG
LOG_CONFIG["loggers"][""]["level"] = logging.DEBUG
dictConfig(LOG_CONFIG)
with open(args.params_file_path, "r") as f:
params = json.load(f)
constraints: SystemConstraints[str] = {
axis: AxisConstraints.build(**params["constraints"][axis])
for axis in AXIS_NAMES
}
origin_from_file: List[float] = cast(List[float], params["origin"])
origin: Coordinates[str, np.float64] = dict(
zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))
)
target_list = [
MoveTarget.build(
dict(zip(AXIS_NAMES, target["coordinates"])), target["max_speed"]
)
for target in params["target_list"]
]
manager = move_manager.MoveManager(constraints=constraints)
_, blend_log = manager.plan_motion(
origin=origin,
target_list=target_list,
iteration_limit=params["iteration_limit"],
)
output = {
"moves": [v.to_dict() for v in blend_log[-1]],
"origin": list(vectorize(origin)),
}
def myconverter(obj: Any) -> Any:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
return obj
with open(args.output, "w") as f:
json.dump(output, f, indent=2, default=myconverter)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "b7d75c2523dba0baaf06ba270045a4a344b8156c",
"index": 3023,
"step-1": "<mask token>\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\n<mask token>\n",
"step-2": "<mask token>\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import AxisConstraints, SystemConstraints, MoveTarget, vectorize, Coordinates\nfrom typing import Dict, Any, List, cast\nAXIS_NAMES = ['X', 'Y', 'Z', 'A', 'B', 'C']\nlog = logging.getLogger(__name__)\nLOG_CONFIG: Dict[str, Any] = {'version': 1, 'disable_existing_loggers': \n False, 'formatters': {'basic': {'format':\n '%(asctime)s %(name)s %(levelname)s %(message)s'}}, 'handlers': {\n 'stream_handler': {'class': 'logging.StreamHandler', 'formatter':\n 'basic', 'level': logging.INFO}}, 'loggers': {'': {'handlers': [\n 'stream_handler'], 'level': logging.DEBUG}}}\n\n\ndef main() ->None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description='Motion planning script.')\n parser.add_argument('--params-file-path', '-p', type=str, required=\n False, default=os.path.join(os.path.dirname(__file__) +\n '/motion_params.json'), help='the parameter file path')\n parser.add_argument('--debug', '-d', type=bool, required=False, default\n =False, help='set logging level to debug')\n parser.add_argument('--output', '-o', type=str, required=False, default\n =os.path.join(os.path.dirname(__file__) + '/motion_output.json'),\n help='the output file path')\n parser.add_argument('--blend-log', '-b', choices=['last', 'all'],\n required=False, default='last', help=\n 'output the last list or all of the blend log')\n args = parser.parse_args()\n if args.debug:\n LOG_CONFIG['handlers']['stream_handler']['level'] = logging.DEBUG\n LOG_CONFIG['loggers']['']['level'] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n with open(args.params_file_path, 'r') as f:\n params = json.load(f)\n constraints: SystemConstraints[str] = {axis: AxisConstraints.build(**\n params['constraints'][axis]) for axis in AXIS_NAMES}\n origin_from_file: List[float] = cast(List[float], params['origin'])\n origin: Coordinates[str, np.float64] = dict(zip(AXIS_NAMES, (np.float64\n (c) for c in origin_from_file)))\n target_list = [MoveTarget.build(dict(zip(AXIS_NAMES, target[\n 'coordinates'])), target['max_speed']) for target in params[\n 'target_list']]\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(origin=origin, target_list=\n target_list, iteration_limit=params['iteration_limit'])\n output = {'moves': [v.to_dict() for v in blend_log[-1]], 'origin': list\n (vectorize(origin))}\n\n def myconverter(obj: Any) ->Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n with open(args.output, 'w') as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"A simple script to create a motion plan.\"\"\"\nimport os\nimport json\nimport logging\nfrom logging.config import dictConfig\nimport argparse\nimport numpy as np\n\nfrom opentrons_hardware.hardware_control.motion_planning import move_manager\nfrom opentrons_hardware.hardware_control.motion_planning.types import (\n AxisConstraints,\n SystemConstraints,\n MoveTarget,\n vectorize,\n Coordinates,\n)\nfrom typing import Dict, Any, List, cast\n\nAXIS_NAMES = [\"X\", \"Y\", \"Z\", \"A\", \"B\", \"C\"]\n\nlog = logging.getLogger(__name__)\n\nLOG_CONFIG: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"basic\": {\"format\": \"%(asctime)s %(name)s %(levelname)s %(message)s\"}\n },\n \"handlers\": {\n \"stream_handler\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"basic\",\n \"level\": logging.INFO,\n },\n },\n \"loggers\": {\n \"\": {\n \"handlers\": [\"stream_handler\"],\n \"level\": logging.DEBUG,\n },\n },\n}\n\n\ndef main() -> None:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser(description=\"Motion planning script.\")\n parser.add_argument(\n \"--params-file-path\",\n \"-p\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_params.json\"),\n help=\"the parameter file path\",\n )\n parser.add_argument(\n \"--debug\",\n \"-d\",\n type=bool,\n required=False,\n default=False,\n help=\"set logging level to debug\",\n )\n parser.add_argument(\n \"--output\",\n \"-o\",\n type=str,\n required=False,\n default=os.path.join(os.path.dirname(__file__) + \"/motion_output.json\"),\n help=\"the output file path\",\n )\n parser.add_argument(\n \"--blend-log\",\n \"-b\",\n choices=[\"last\", \"all\"],\n required=False,\n default=\"last\",\n help=\"output the last list or all of the blend log\",\n )\n args = parser.parse_args()\n\n if args.debug:\n LOG_CONFIG[\"handlers\"][\"stream_handler\"][\"level\"] = logging.DEBUG\n LOG_CONFIG[\"loggers\"][\"\"][\"level\"] = logging.DEBUG\n dictConfig(LOG_CONFIG)\n\n with open(args.params_file_path, \"r\") as f:\n params = json.load(f)\n\n constraints: SystemConstraints[str] = {\n axis: AxisConstraints.build(**params[\"constraints\"][axis])\n for axis in AXIS_NAMES\n }\n origin_from_file: List[float] = cast(List[float], params[\"origin\"])\n origin: Coordinates[str, np.float64] = dict(\n zip(AXIS_NAMES, (np.float64(c) for c in origin_from_file))\n )\n target_list = [\n MoveTarget.build(\n dict(zip(AXIS_NAMES, target[\"coordinates\"])), target[\"max_speed\"]\n )\n for target in params[\"target_list\"]\n ]\n\n manager = move_manager.MoveManager(constraints=constraints)\n _, blend_log = manager.plan_motion(\n origin=origin,\n target_list=target_list,\n iteration_limit=params[\"iteration_limit\"],\n )\n\n output = {\n \"moves\": [v.to_dict() for v in blend_log[-1]],\n \"origin\": list(vectorize(origin)),\n }\n\n def myconverter(obj: Any) -> Any:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n return obj\n\n with open(args.output, \"w\") as f:\n json.dump(output, f, indent=2, default=myconverter)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .sgd import StochasticGradientDescent
from .momentum import Momentum
|
flexible
|
{
"blob_id": "aa55f1dd4f363e07d5f9104346efaa24c0457d45",
"index": 9126,
"step-1": "<mask token>\n",
"step-2": "from .sgd import StochasticGradientDescent\nfrom .momentum import Momentum\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# 总管buffer和policy
from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(
self,
env,
policy: torch.nn.modules.container.Sequential,
learning_rate: float,
n_policy: int, # 迭代多少个策略
n_episode: int, # 每个策略下输出多少个episode用来更新该策略
max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了
) -> None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
# self.buffer = buffer
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)
def get_acs(self, obs):
'''
obs is shape (batch_size, n_dim)
'''
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs # shape (batch_size,)
def get_ac(self, ob):
'''
ob is shape (n_dim,)
'''
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1,-1))
# 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦
# ac = torch.argmax(logits)
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render = False):
next_ob = self.env.reset().reshape(1,-1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1,-1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
# break
if done or timesteps > self.max_timesteps:
break
# print(acs, type(acs), 'acs')
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)
def train(self):
'''
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
'''
# print(self.policy.state_dict(), 'p1')
for i_policy in range(self.n_policy):
J = 0 # j tilda,也就是loss
q = 0
for i_episode in range(self.n_episode):
# 生成
obs, acs, next_obs, res, terminals = self.generate_episode()
# print(acs, acs.shape, 'acs')
assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))
r_tau = sum(res)
logits = self.policy(obs)
# print(logits, logits.shape, 'logits')
# print(acs, type(acs))
criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题
negative_likelihoods = criterion(logits, acs)
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
negative_likelihoods = negative_likelihoods.sum()
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
# print(r_tau, 'r_tau')
J += negative_likelihoods*r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(f"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}") # 这里的loss估计不对,要用平均每次的
J.backward()
self.optimizer.step()
# print(self.policy.state_dict(), 'p2')
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
|
normal
|
{
"blob_id": "b2cfd397e48213a540608fc232db2eab282935bb",
"index": 1481,
"step-1": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n <mask token>\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-2": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n <mask token>\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-3": "<mask token>\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-4": "from os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\nimport global_var as gv\ntorch.set_default_dtype(gv.torch_default_type)\n\n\nclass PG_Agent(object):\n\n def __init__(self, env, policy: torch.nn.modules.container.Sequential,\n learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int\n ) ->None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.\n learning_rate)\n\n def get_acs(self, obs):\n \"\"\"\n obs is shape (batch_size, n_dim)\n \"\"\"\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs\n\n def get_ac(self, ob):\n \"\"\"\n ob is shape (n_dim,)\n \"\"\"\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1, -1))\n distri = distributions.Categorical(logits=logits)\n return distri.sample().item()\n\n def generate_episode(self, render=False):\n next_ob = self.env.reset().reshape(1, -1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1, -1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n if done or timesteps > self.max_timesteps:\n break\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)\n ), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)\n ), torch.tensor(res), torch.tensor(terminals)\n\n def train(self):\n \"\"\"\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n \"\"\"\n for i_policy in range(self.n_policy):\n J = 0\n q = 0\n for i_episode in range(self.n_episode):\n obs, acs, next_obs, res, terminals = self.generate_episode()\n assert len(obs) == len(next_obs) == len(res) == len(acs\n ) == len(terminals)\n r_tau = sum(res)\n logits = self.policy(obs)\n criterion = nn.CrossEntropyLoss(reduction='sum')\n negative_likelihoods = criterion(logits, acs)\n negative_likelihoods = negative_likelihoods.sum()\n J += negative_likelihoods * r_tau\n q += res.sum().item()\n J /= self.n_episode\n self.optimizer.zero_grad()\n print(\n f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'\n )\n J.backward()\n self.optimizer.step()\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n",
"step-5": "# 总管buffer和policy\n\n\n\nfrom os import path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.distributions as distributions\nimport numpy as np\nfrom torch.serialization import load\n\nimport global_var as gv\n\ntorch.set_default_dtype(gv.torch_default_type)\n\nclass PG_Agent(object):\n def __init__(\n self,\n env,\n policy: torch.nn.modules.container.Sequential, \n learning_rate: float,\n n_policy: int, # 迭代多少个策略\n n_episode: int, # 每个策略下输出多少个episode用来更新该策略\n max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了\n ) -> None:\n super().__init__()\n self.env = env\n self.policy = policy\n self.learning_rate = learning_rate\n # self.buffer = buffer\n self.n_policy = n_policy\n self.n_episode = n_episode\n self.max_timesteps = max_timesteps\n\n self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)\n\n def get_acs(self, obs):\n '''\n obs is shape (batch_size, n_dim)\n '''\n logits = self.policy(obs)\n acs = torch.argmax(logits, dim=1)\n return acs # shape (batch_size,)\n \n def get_ac(self, ob):\n '''\n ob is shape (n_dim,)\n '''\n if isinstance(ob, np.ndarray):\n ob = torch.from_numpy(ob.astype(gv.np_default_type))\n logits = self.policy(ob.view(1,-1))\n # 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦\n # ac = torch.argmax(logits)\n distri = distributions.Categorical(logits=logits)\n\n return distri.sample().item()\n\n def generate_episode(self, render = False):\n next_ob = self.env.reset().reshape(1,-1)\n if render:\n self.env.render()\n timesteps = 0\n obs = []\n acs = []\n next_obs = []\n res = []\n terminals = []\n while True:\n ob = next_ob\n ac = self.get_ac(ob)\n next_ob, re, done, info = self.env.step(ac)\n if render:\n self.env.render()\n next_ob = next_ob.reshape(1,-1)\n obs.append(ob)\n acs.append(ac)\n next_obs.append(next_ob)\n res.append(re)\n terminals.append(done)\n # break\n if done or timesteps > self.max_timesteps:\n break\n # print(acs, type(acs), 'acs')\n return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)\n\n\n def train(self):\n '''\n for _ in 轮数:\n 由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode\n # 那么也不需要buffer了\n 按照既有策略生成buffer\n 从buffer中获取数据\n\n 利用loss计算j tilder\n 求梯度\n 更新loss\n\n '''\n # print(self.policy.state_dict(), 'p1')\n for i_policy in range(self.n_policy):\n J = 0 # j tilda,也就是loss\n q = 0\n for i_episode in range(self.n_episode):\n # 生成\n obs, acs, next_obs, res, terminals = self.generate_episode()\n # print(acs, acs.shape, 'acs')\n assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))\n r_tau = sum(res)\n logits = self.policy(obs)\n\n # print(logits, logits.shape, 'logits')\n # print(acs, type(acs))\n\n criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题\n negative_likelihoods = criterion(logits, acs)\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n negative_likelihoods = negative_likelihoods.sum()\n # print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')\n # print(r_tau, 'r_tau')\n J += negative_likelihoods*r_tau\n q += res.sum().item()\n \n J /= self.n_episode\n self.optimizer.zero_grad()\n print(f\"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}\") # 这里的loss估计不对,要用平均每次的\n J.backward()\n self.optimizer.step()\n\n # print(self.policy.state_dict(), 'p2')\n\n def save_policy(self, path='policy.pth'):\n torch.save(self.policy, path)\n\n def load_policy(self, path='policy.pth'):\n self.policy = torch.load(path)\n\n\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, What, Is,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskSize,
BlockObjectThis, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskSize,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskColour,
BlockObjectThis, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskColour,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
AbstractDescription, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,
BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,
BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,
The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,
BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,
BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,
AbstractDescription, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, AbstractDescription, ConcreteDescription], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]]]
ANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,
BlockObjectThis, AbstractDescription], [Human, What, Is,
BlockObjectThat], [Human, What, Is, BlockObjectThat,
AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,
What, Is, The, AbstractDescription, BlockObjectLocation], [Human,
WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [
Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,
BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat
], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,
AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,
AbstractDescription, BlockObjectLocation], [Human, AskSize, The,
ConcreteDescription, BlockObjectLocation], [Human, AskColour,
BlockObjectThis], [Human, AskColour, BlockObjectThis,
AbstractDescription], [Human, AskColour, BlockObjectThis,
ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,
AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,
BlockObjectThat, ConcreteDescription], [Human, AskColour, The,
AbstractDescription, BlockObjectLocation], [Human, AskColour, The,
ConcreteDescription, BlockObjectLocation], [Human, AskIs,
BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,
AbstractDescription, Size], [Human, AskIs, BlockObjectThis,
ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [
Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,
AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,
AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,
ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,
BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,
AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,
ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,
AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,
The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,
The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,
BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,
AbstractDescription, ConcreteDescription], [Human, AskIs,
BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,
AbstractDescription, ConcreteDescription], [Human, AskIs, The,
AbstractDescription, BlockObjectLocation, ConcreteDescription]]
GET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,
ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,
QueryBot, CurrentLocation]] + ANSWER_TEMPLATES
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from template_objects import *
ANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, What, Is,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskSize,
BlockObjectThis, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskSize,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskColour,
BlockObjectThis, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskColour,
BlockObjectThat], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,
AbstractDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
AbstractDescription, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,
BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,
BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,
AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,
The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,
ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,
BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,
BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,
ConcreteDescription], [HumanReplace, The, AbstractDescription,
BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,
AbstractDescription, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, ConcreteDescription], [HumanReplace, The,
AbstractDescription, BlockObjectLocation]], [[Human, AskIs,
BlockObjectThat, AbstractDescription, ConcreteDescription], [
HumanReplace, The, AbstractDescription, BlockObjectLocation]]]
ANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,
BlockObjectThis, AbstractDescription], [Human, What, Is,
BlockObjectThat], [Human, What, Is, BlockObjectThat,
AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,
What, Is, The, AbstractDescription, BlockObjectLocation], [Human,
WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [
Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,
BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat
], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,
AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,
AbstractDescription, BlockObjectLocation], [Human, AskSize, The,
ConcreteDescription, BlockObjectLocation], [Human, AskColour,
BlockObjectThis], [Human, AskColour, BlockObjectThis,
AbstractDescription], [Human, AskColour, BlockObjectThis,
ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,
AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,
BlockObjectThat, ConcreteDescription], [Human, AskColour, The,
AbstractDescription, BlockObjectLocation], [Human, AskColour, The,
ConcreteDescription, BlockObjectLocation], [Human, AskIs,
BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,
AbstractDescription, Size], [Human, AskIs, BlockObjectThis,
ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [
Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,
AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,
AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,
ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,
BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,
AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,
ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,
AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,
The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,
The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,
BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,
AbstractDescription, ConcreteDescription], [Human, AskIs,
BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,
AbstractDescription, ConcreteDescription], [Human, AskIs, The,
AbstractDescription, BlockObjectLocation, ConcreteDescription]]
GET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,
ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,
QueryBot, CurrentLocation]] + ANSWER_TEMPLATES
<|reserved_special_token_1|>
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
# fmt: off
"""
Every template contains an ordered list of TemplateObjects.
TemplateObject is defined in template_objects.py
GetMemory templates are written for filters and have an answer_type
They represent the action of fetching from the memory using the filters.
Examples:
[Human, QueryBotCurrentAction],
- human: what are you doing
- human: what are you up to
[Human, QueryBot, MoveTarget],
- human: where you going
- human: where are you heading
"""
from template_objects import *
ANSWER_WITH_CORRECTION = [
## what is this + the thing at location ##
[[Human, What, Is, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what size is X + the thing at location ##
[[Human, AskSize, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what color is X + the thing at location ##
[[Human, AskColour, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
# Is X Y ##
[[Human, AskIs, BlockObjectThis, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## Is X a Y ##
[[Human, AskIs, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
]
ANSWER_TEMPLATES = [
# 1
## What is X ##
[Human, What, Is, BlockObjectThis],
[Human, What, Is, BlockObjectThis, AbstractDescription],
[Human, What, Is, BlockObjectThat],
[Human, What, Is, BlockObjectThat, AbstractDescription],
# 2
## What is at X ##
[Human, What, Is, BlockObjectLocation],
[Human, What, Is, The, AbstractDescription, BlockObjectLocation],
## What do you see at X ##
[Human, WhatSee, BlockObjectLocation],
# 3
# What size is X ##
[Human, AskSize, BlockObjectThis],
[Human, AskSize, BlockObjectThis, AbstractDescription],
[Human, AskSize, BlockObjectThis, ConcreteDescription],
[Human, AskSize, BlockObjectThat],
[Human, AskSize, BlockObjectThat, AbstractDescription],
[Human, AskSize, BlockObjectThat, ConcreteDescription],
# 4
## what size is X at Y ##
[Human, AskSize, The, AbstractDescription, BlockObjectLocation],
[Human, AskSize, The, ConcreteDescription, BlockObjectLocation],
# 5
# What colour is X ##
[Human, AskColour, BlockObjectThis],
[Human, AskColour, BlockObjectThis, AbstractDescription],
[Human, AskColour, BlockObjectThis, ConcreteDescription],
[Human, AskColour, BlockObjectThat],
[Human, AskColour, BlockObjectThat, AbstractDescription],
[Human, AskColour, BlockObjectThat, ConcreteDescription],
# 6
## what colour is X at Y ##
[Human, AskColour, The, AbstractDescription, BlockObjectLocation],
[Human, AskColour, The, ConcreteDescription, BlockObjectLocation],
# 7
## Is X Y ##
[Human, AskIs, BlockObjectThis, Size],
[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[Human, AskIs, BlockObjectThat, Size],
[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[Human, AskIs, BlockObjectThis, Colour],
[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
# 8
## Is X a Y ##
[Human, AskIs, BlockObjectThis, ConcreteDescription],
[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[Human, AskIs, BlockObjectThat, ConcreteDescription],
[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
# 9
## IS X at Y Z ##
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],
]
GET_MEMORY_TEMPLATES = [
## What are you Doing (Action name) ##
[Human, QueryBotCurrentAction],
## What are you Building (Action reference object name) ##
[Human, QueryBot, ActionReferenceObjectName],
## Where are you heading (Move target) ##
[Human, QueryBot, MoveTarget],
## Where are you (Bot location) ##
[Human, QueryBot, CurrentLocation],
] + ANSWER_TEMPLATES
|
flexible
|
{
"blob_id": "ceb714e949a72f621aec8b8728fbd1201e22afd1",
"index": 8705,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-3": "<mask token>\nfrom template_objects import *\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-4": "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\n# fmt: off\n\"\"\"\nEvery template contains an ordered list of TemplateObjects.\nTemplateObject is defined in template_objects.py\n\nGetMemory templates are written for filters and have an answer_type\nThey represent the action of fetching from the memory using the filters.\n\nExamples:\n\n[Human, QueryBotCurrentAction],\n- human: what are you doing\n- human: what are you up to\n\n[Human, QueryBot, MoveTarget],\n- human: where you going\n- human: where are you heading\n\"\"\"\nfrom template_objects import *\n\nANSWER_WITH_CORRECTION = [\n ## what is this + the thing at location ##\n [[Human, What, Is, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what size is X + the thing at location ##\n [[Human, AskSize, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what color is X + the thing at location ##\n [[Human, AskColour, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n # Is X Y ##\n [[Human, AskIs, BlockObjectThis, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n [[Human, AskIs, BlockObjectThis, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## Is X a Y ##\n [[Human, AskIs, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n]\n\nANSWER_TEMPLATES = [\n # 1\n ## What is X ##\n [Human, What, Is, BlockObjectThis],\n [Human, What, Is, BlockObjectThis, AbstractDescription],\n [Human, What, Is, BlockObjectThat],\n [Human, What, Is, BlockObjectThat, AbstractDescription],\n\n # 2\n ## What is at X ##\n [Human, What, Is, BlockObjectLocation],\n [Human, What, Is, The, AbstractDescription, BlockObjectLocation],\n\n ## What do you see at X ##\n [Human, WhatSee, BlockObjectLocation],\n\n # 3\n # What size is X ##\n [Human, AskSize, BlockObjectThis],\n [Human, AskSize, BlockObjectThis, AbstractDescription],\n [Human, AskSize, BlockObjectThis, ConcreteDescription],\n [Human, AskSize, BlockObjectThat],\n [Human, AskSize, BlockObjectThat, AbstractDescription],\n [Human, AskSize, BlockObjectThat, ConcreteDescription],\n\n # 4\n ## what size is X at Y ##\n [Human, AskSize, The, AbstractDescription, BlockObjectLocation],\n [Human, AskSize, The, ConcreteDescription, BlockObjectLocation],\n\n # 5\n # What colour is X ##\n [Human, AskColour, BlockObjectThis],\n [Human, AskColour, BlockObjectThis, AbstractDescription],\n [Human, AskColour, BlockObjectThis, ConcreteDescription],\n [Human, AskColour, BlockObjectThat],\n [Human, AskColour, BlockObjectThat, AbstractDescription],\n [Human, AskColour, BlockObjectThat, ConcreteDescription],\n\n # 6\n ## what colour is X at Y ##\n [Human, AskColour, The, AbstractDescription, BlockObjectLocation],\n [Human, AskColour, The, ConcreteDescription, BlockObjectLocation],\n\n # 7\n ## Is X Y ##\n [Human, AskIs, BlockObjectThis, Size],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [Human, AskIs, BlockObjectThat, Size],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n\n [Human, AskIs, BlockObjectThis, Colour],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n\n # 8\n ## Is X a Y ##\n [Human, AskIs, BlockObjectThis, ConcreteDescription],\n [Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n\n # 9\n ## IS X at Y Z ##\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],\n\n] \n\nGET_MEMORY_TEMPLATES = [\n ## What are you Doing (Action name) ##\n [Human, QueryBotCurrentAction],\n\n ## What are you Building (Action reference object name) ##\n [Human, QueryBot, ActionReferenceObjectName],\n\n ## Where are you heading (Move target) ##\n [Human, QueryBot, MoveTarget],\n\n ## Where are you (Bot location) ##\n [Human, QueryBot, CurrentLocation],\n] + ANSWER_TEMPLATES\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception(
'The frequency array is empty, check your function implementation!'
)
return self.__frequency_dict
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception(
'The frequency array is empty, check your function implementation!'
)
return self.__frequency_dict
def get_running_time(self):
return self.__running_time
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception(
'The frequency array is empty, check your function implementation!'
)
return self.__frequency_dict
def get_running_time(self):
return self.__running_time
def get_frequency(self):
"""
Implement your elements frequency algorithm
:return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!
"""
self.__running_time = round(time.time() * 1000) - self.__running_time
return self.__frequency_dict
<|reserved_special_token_1|>
___author__ = 'acmASCIS'
'''
by ahani at {9/24/2016}
'''
import time
class Freq(object):
def __init__(self, array):
self.__array = array
self.__frequency_dict = {}
self.__array_length = len(array)
self.__running_time = round(time.time() * 1000)
def get_original_array(self):
return self.__array
def get_array_length(self):
return self.__array_length
def get_frequency_array(self):
if self.__frequency_dict is None:
raise Exception("The frequency array is empty, check your function implementation!")
return self.__frequency_dict
def get_running_time(self):
return self.__running_time
def get_frequency(self):
"""
Implement your elements frequency algorithm
:return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!
"""
#TODO
self.__running_time = round(time.time() * 1000) - self.__running_time
return self.__frequency_dict
|
flexible
|
{
"blob_id": "b569f0a0dda048d6337e1028a240caabf188a174",
"index": 9420,
"step-1": "<mask token>\n\n\nclass Freq(object):\n\n def __init__(self, array):\n self.__array = array\n self.__frequency_dict = {}\n self.__array_length = len(array)\n self.__running_time = round(time.time() * 1000)\n\n def get_original_array(self):\n return self.__array\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Freq(object):\n\n def __init__(self, array):\n self.__array = array\n self.__frequency_dict = {}\n self.__array_length = len(array)\n self.__running_time = round(time.time() * 1000)\n\n def get_original_array(self):\n return self.__array\n\n def get_array_length(self):\n return self.__array_length\n\n def get_frequency_array(self):\n if self.__frequency_dict is None:\n raise Exception(\n 'The frequency array is empty, check your function implementation!'\n )\n return self.__frequency_dict\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Freq(object):\n\n def __init__(self, array):\n self.__array = array\n self.__frequency_dict = {}\n self.__array_length = len(array)\n self.__running_time = round(time.time() * 1000)\n\n def get_original_array(self):\n return self.__array\n\n def get_array_length(self):\n return self.__array_length\n\n def get_frequency_array(self):\n if self.__frequency_dict is None:\n raise Exception(\n 'The frequency array is empty, check your function implementation!'\n )\n return self.__frequency_dict\n\n def get_running_time(self):\n return self.__running_time\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass Freq(object):\n\n def __init__(self, array):\n self.__array = array\n self.__frequency_dict = {}\n self.__array_length = len(array)\n self.__running_time = round(time.time() * 1000)\n\n def get_original_array(self):\n return self.__array\n\n def get_array_length(self):\n return self.__array_length\n\n def get_frequency_array(self):\n if self.__frequency_dict is None:\n raise Exception(\n 'The frequency array is empty, check your function implementation!'\n )\n return self.__frequency_dict\n\n def get_running_time(self):\n return self.__running_time\n\n def get_frequency(self):\n \"\"\"\n Implement your elements frequency algorithm\n :return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!\n \"\"\"\n self.__running_time = round(time.time() * 1000) - self.__running_time\n return self.__frequency_dict\n",
"step-5": "___author__ = 'acmASCIS'\n\n'''\n by ahani at {9/24/2016}\n'''\n\nimport time\n\n\nclass Freq(object):\n def __init__(self, array):\n self.__array = array\n self.__frequency_dict = {}\n self.__array_length = len(array)\n self.__running_time = round(time.time() * 1000)\n\n def get_original_array(self):\n return self.__array\n\n def get_array_length(self):\n return self.__array_length\n\n def get_frequency_array(self):\n if self.__frequency_dict is None:\n raise Exception(\"The frequency array is empty, check your function implementation!\")\n\n return self.__frequency_dict\n\n def get_running_time(self):\n return self.__running_time\n\n def get_frequency(self):\n \"\"\"\n Implement your elements frequency algorithm\n :return: (dictionary) that contains key: element in array, value: frequency. Note that your dictionary should be sorted by key!\n \"\"\"\n\n #TODO\n\n\n self.__running_time = round(time.time() * 1000) - self.__running_time\n\n return self.__frequency_dict\n",
"step-ids": [
3,
5,
6,
7,
10
]
}
|
[
3,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def overlap_rect(rec1, rec2):
"""Determine if rectangles overlap."""
a = rec2[2] <= rec1[0]
b = rec1[2] <= rec2[0]
c = rec2[3] <= rec1[1]
d = rec1[3] <= rec2[1]
return not (a or b or c or d)
<|reserved_special_token_1|>
# leetcode 836
# determine if two rectangles overlap
# input is two lists [x1,y1,x2,y2] coordinates
# where x1,y1 are coordinates of bottom left corner
# and x2,y2 are coordinates of top right corner
def overlap_rect(rec1, rec2):
"""Determine if rectangles overlap."""
# true if rec2 is left of rec1
a = rec2[2] <= rec1[0]
# true if rec2 is right of rec1
b = rec1[2] <= rec2[0]
# true if rec2 is below rec1
c = rec2[3] <= rec1[1]
# true if rec2 is above rec1
d = rec1[3] <= rec2[1]
return not (a or b or c or d)
|
flexible
|
{
"blob_id": "0ef03ed455938bd2001581986c38104bfac395ce",
"index": 8078,
"step-1": "<mask token>\n",
"step-2": "def overlap_rect(rec1, rec2):\n \"\"\"Determine if rectangles overlap.\"\"\"\n a = rec2[2] <= rec1[0]\n b = rec1[2] <= rec2[0]\n c = rec2[3] <= rec1[1]\n d = rec1[3] <= rec2[1]\n return not (a or b or c or d)\n",
"step-3": "# leetcode 836\n# determine if two rectangles overlap\n# input is two lists [x1,y1,x2,y2] coordinates\n# where x1,y1 are coordinates of bottom left corner\n# and x2,y2 are coordinates of top right corner\n\ndef overlap_rect(rec1, rec2):\n \"\"\"Determine if rectangles overlap.\"\"\"\n # true if rec2 is left of rec1\n a = rec2[2] <= rec1[0]\n \n # true if rec2 is right of rec1\n b = rec1[2] <= rec2[0]\n\n # true if rec2 is below rec1\n c = rec2[3] <= rec1[1]\n\n # true if rec2 is above rec1\n d = rec1[3] <= rec2[1]\n\n return not (a or b or c or d)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import argparse
def parse_args():
"""
Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.
:return: Populated namespace.
"""
parser = argparse.ArgumentParser(description='baseline Mask R-CNN')
parser.add_argument('--dataset', required=True,
metavar="/path/to/dataset/",
help='Directory of the dataset')
parser.add_argument('--continue_train', type=str, required=False, default='None',
metavar="/path/to/latest/weights.h5", help="Path to lastest training weights .h5 file")
parser.add_argument('--weight', required=False,
metavar='/path/to/pretrained/weight.h5', help="Path to trained weight")
parser.add_argument('--image', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
parser.add_argument('--video', required=False,
metavar='/path/to/testing/image/directory', help="Path to testing image directory")
return parser.parse_args()
|
normal
|
{
"blob_id": "b6527a09f346ee1b7dd446a0ff21995a995481a8",
"index": 6640,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True, metavar=\n '/path/to/dataset/', help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False,\n default='None', metavar='/path/to/latest/weights.h5', help=\n 'Path to lastest training weights .h5 file')\n parser.add_argument('--weight', required=False, metavar=\n '/path/to/pretrained/weight.h5', help='Path to trained weight')\n parser.add_argument('--image', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n parser.add_argument('--video', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n return parser.parse_args()\n",
"step-3": "import argparse\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True, metavar=\n '/path/to/dataset/', help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False,\n default='None', metavar='/path/to/latest/weights.h5', help=\n 'Path to lastest training weights .h5 file')\n parser.add_argument('--weight', required=False, metavar=\n '/path/to/pretrained/weight.h5', help='Path to trained weight')\n parser.add_argument('--image', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n parser.add_argument('--video', required=False, metavar=\n '/path/to/testing/image/directory', help=\n 'Path to testing image directory')\n return parser.parse_args()\n",
"step-4": "import argparse\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments to train and evaluate a multimodal network for activity recognition on MM-Fit.\n :return: Populated namespace.\n \"\"\"\n parser = argparse.ArgumentParser(description='baseline Mask R-CNN')\n parser.add_argument('--dataset', required=True,\n metavar=\"/path/to/dataset/\",\n help='Directory of the dataset')\n parser.add_argument('--continue_train', type=str, required=False, default='None',\n metavar=\"/path/to/latest/weights.h5\", help=\"Path to lastest training weights .h5 file\")\n parser.add_argument('--weight', required=False,\n metavar='/path/to/pretrained/weight.h5', help=\"Path to trained weight\")\n parser.add_argument('--image', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n parser.add_argument('--video', required=False,\n metavar='/path/to/testing/image/directory', help=\"Path to testing image directory\")\n return parser.parse_args()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import time
import argparse
import cPickle as pickle
from definitions import OieFeatures
from definitions.OieExample import OieExample
class FeatureLexicon:
"""
A wrapper around various dictionaries storing the mined data. It holds 5 dictionaries in total. Two of them store
mappings\n
- str => int\n
- int => str\n
about all features extracted. An update in these dicts causes an update in the dict holding frequencies, which maps\n
- str => float\n
The final two dicts contain mappings\n
- str => int
- int => str\n
about features which trigger with frequency that exceeds the given threshold.\n
All the string dictionaries keys are of the form 'featDef#value' (i.e. 'posPatternPath#JJ_VV_NN', 'bigrams#e1_t2')
"""
def __init__(self):
self.nextId = 0 # var pointing to the next available id number to be used when inserting new stuff
self.id2Str = {} # map: int => str
self.str2Id = {} # map: str => int
self.id2freq = {} # map: int => float. Gets updated only when 'get_or_add' ins invoked not 'get_or_add_pruned'
self.nextIdPruned = 0 # pointer
self.id2StrPruned = {} # map: int => str
self.str2IdPruned = {} # map: str => int
def get_or_add(self, s):
"""
Returns the numerical ID of the input string mapped by the 'str2Id' dictionary and increments its frequency by 1.
If the input string is not present, it inserts it in the 'str2Id' dict, sets its frequency to 1
and returns its new numerical ID.
:param s: string to search for. eg s = posPatternPath#JJ_VV_NN
:return: the id of the input string as an integer
"""
if s not in self.str2Id:
self.id2Str[self.nextId] = s
self.str2Id[s] = self.nextId
self.id2freq[self.nextId] = 1
self.nextId += 1
else:
self.id2freq[self.str2Id[s]] += 1
return self.str2Id[s]
def get_or_add_pruned(self, s):
"""
Returns the numerical ID of the input string mapped by the 'str2IdPruned' dictionary.
If the input string is not present, it inserts it in the 'str2IdPruned' dict and returns its new
numerical ID. There is no frequency update here.
:param s: string to search for belonging to the pruned ones, eg posPatternPath#NN_VV_ADJ_VBP
:return: the id of the input string as an integer
"""
if s not in self.str2IdPruned:
self.id2StrPruned[self.nextIdPruned] = s
self.str2IdPruned[s] = self.nextIdPruned
self.nextIdPruned += 1
return self.str2IdPruned[s]
def get_id(self, a_string):
"""
:param a_string: a feature such as 'bigrams#e1_t1'
:return: the numerical ID from the str2Id dict
"""
if a_string not in self.str2Id:
return None
return self.str2Id[a_string]
def get_str(self, idx):
"""
Returns the feature corresponding to the input numerical ID, as a string, eg 'bigrams#e1_t1'
:param idx: a numerical ID
:return: the feature corresponding to the input ID, mapped by sth id2Str dict
"""
if idx not in self.id2Str:
return None
else:
return self.id2Str[idx]
def get_str_pruned(self, idx):
"""
Returns the feature corresponding to the input numerical ID, only if the frequency of the feature triggering
has passed a given threshold (if the key is found in the id2StrPruned dict). Returns None if not found.
:param idx: a numerical ID
:return: the feature function name concatenated with '#' and the string value of it (i.e. 'bigrams#e1_t1', 'arg1_lower#java')
"""
if idx not in self.id2StrPruned:
return None
else:
return self.id2StrPruned[idx]
def get_freq(self, idx):
"""
Returns the number of times the feature, corresponding to the input ID, has occured.
:param idx: a numerical ID
:return: the frequency of the input ID's feature
"""
if idx not in self.id2freq:
return None
return self.id2freq[idx]
def get_feature_space_dimensionality(self):
"""
Returns the number of features that have passed the thresholding\n
:return: the number of (unique) entries in the id2strPruned dict
"""
return self.nextIdPruned
def build_feature_lexicon(raw_features, feature_extractors, lexicon):
# invokes internally get_or_add building the str2Id, id2Str, id2freq dicts since expand parameter is True
print 'Building feature lexicon...'
for ex_f in raw_features:
get_features(lexicon, feature_extractors, [ex_f[1], ex_f[4], ex_f[5], ex_f[7], ex_f[8], ex_f[6]], ex_f[2], ex_f[3], expand=True)
print ' Lexicon now has {} unique entries'.format(lexicon.nextId)
def get_features(lexicon, feature_extractors, info, arg1=None, arg2=None, expand=False):
"""
Returns a list of the numerical IDs of the features extracted from the input information. Input information
represents a single sentence in the mined dataset.
:type lexicon: FeatureLexicon
:param feature_extractors: a list of feature extraction functions as the ones defined in OieFeatures.py eg [trigger,
entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]
:param info: a list containing information of the input datapoint\n
Example\n
parsing : info[0] = '<-poss<-production->prep->for->pobj->'\n
entities : info[1] = 'JOBTITLE-JOBTITLE'\n
trig : info[2] = 'TRIGGER:review|performance'\n
sentence : info[3] = 'Supervised learning us a subset of learning methods'\n
pos : info[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\n
docPath : info[5] = './2000/01/01/1165031.xml'
:param arg1: entity1 string, eg 'Java'
:param arg2: entity2 string, eg 'C++'
:type expand: Boolean flag controlling whether str2Id, id2Str and id2freq dictionaries should be expanded as new
entries appear. If false it is assumed that inner dicts are already maximally populated.
:return: the list of feature IDs
"""
feats = []
for f in feature_extractors:
res = f(info, arg1, arg2)
if res is not None:
for feat_el in generate_feature_element(res):
_load_features(lexicon, f.__name__ + "#" + feat_el, feats, expand=expand)
return feats
def get_thresholded_features(lexicon, feature_extractors, info, arg1, arg2, threshold, expand=False):
"""
Returns a list of the numerical IDs of the features extracted from the input information which frequency value
exceed the given threshold. Input information represents a single sentence in the mined dataset.
:type lexicon: FeatureLexicon
:param feature_extractors: a list of feature exraction functions as the ones defined in OieFeatures.py eg [trigger,
entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]
:param info: a list containing information of the input datapoint\n
Example\n
- parsing : l[0] = '<-poss<-production->prep->for->pobj->'\n
- entities : l[1] = 'JOBTITLE-JOBTITLE'\n
- trig : l[2] = 'TRIGGER:review|performance'\n
- sentence : l[3] = 'Supervised learning us a subset of learning methods'\n
- pos : l[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\n
- docPath : l[5] = './2000/01/01/1165031.xml'
:param arg1: entity1 string, eg 'Java'
:param arg2: entity2 string, eg 'C++'
:param expand: flag controlling whether str2IdPruned, id2StrPruned dictionaries should be expanded as new
entries appear. If false it is assumed that inner dicts are already maximally populated.
:type expand: bool
:param threshold: integer to cut-off low frequency feature strings, such as i.e. infrequent bigrams of the form [bigrams#e1_t1, bigrams#e1_t2, .., posPatternPath#JJ_VV_NN]
:return: the list of feature IDs
"""
feats = []
for f in feature_extractors:
res = f(info, arg1, arg2)
if res is not None:
for feat_el in generate_feature_element(res):
_load_thresholded_features(lexicon, f.__name__ + "#" + feat_el, feats, threshold, expand=expand)
return feats
def generate_feature_element(extractor_output):
if type(extractor_output) == list:
for _ in extractor_output:
yield _
else:
yield extractor_output
def _load_features(lexicon, feat_str_id, feats, expand=False):
if expand:
feats.append(lexicon.get_or_add(feat_str_id))
else:
feat_id = lexicon.get_id(feat_str_id)
if feat_id is not None:
feats.append(feat_id)
def _load_thresholded_features(lexicon, feat_str_id, feats, thres, expand=False):
if expand:
if lexicon.id2freq[lexicon.get_id(feat_str_id)] > thres:
feats.append(lexicon.get_or_add_pruned(feat_str_id))
else:
feat_id = lexicon.get_id(feat_str_id)
if feat_id is not None:
if lexicon.id2freq[feat_id] > thres:
feats.append(lexicon.get_or_add_pruned(feat_str_id))
def read_examples(file_name):
"""
Reads the input tab-separated (\\\\t) file and returns the parsed data as a list of lists of strings. Each line, of the file to read, corresponds to a datapoint and has as many entries as the number of elements of the list returned by definitions.OieFeatures.getBasicCleanFeatures plus one.
Raises and IOError if a line found in the input file does not have 9 elements. The returned lists are of the form:\n
['counter_index', 'entry_1', 'entry_2', .., 'entry_9']\n
A sample file with the required format is '../data-sample.txt'.\n
:param file_name: a file path to read from
:type file_name: str
:return: of lists of strings. Each inner list has as first element a counter 0..N followed by the entries found in a line
returned by definitions.OieFeatures.getBasicCleanFeatures corresponding to the ones in the input file
:rtype: list
"""
start = time.time()
print 'Reading examples from tab separated file...'
count = 0
i = 0
with open(file_name, 'r') as fp:
relation_examples = []
for i, line in enumerate(fp):
line.strip()
if len(line) == 0 or len(line.split()) == 0:
raise IOError
else:
fields = line.split('\t')
assert len(fields) == 9, "a problem with the file format (# fields is wrong) len is " + str(len(fields)) + "instead of 9"
relation_examples.append([str(count)] + fields)
count += 1
print ' File contained {} lines'.format(i + 1)
print ' Datapoints with valid features encoded: {}'.format(count)
print ' Done in {:.2f} sec'.format(time.time() - start)
return relation_examples
def load_features(raw_features_struct, lexicon, examples_list, labels_dict, threshold):
"""
Encodes the input raw feature values into OieExample objects and appends to the input examples_list\n
Reads relation labels_dict, from the input features if found, and updates the corresponding keys in input labels_dict with a list of tokens representing the label\n
It also updates the "thresholded" 'str2IdPruned' and 'id2StrPruneddictionaries'
.. seealso:: :funct:`read_examples`\nTypically, the input raw features data structure is generated by the above function.\n
:param raw_features_struct: the input raw features data structure read from a tab separated file\n
A list of lists with each inner list following the below decoder_type for 'getCleanFeatures':
* feats[0] : counter
* feats[1] : dependency parsing <-, ->, ...
* feats[2] : entity 1 (eg java engineer)
* feats[3] : entity 2 (eg software engineer)
* feats[4] : entity-types-pair (eg JOBTITLE-JOBTITLE)
* feats[5] : trigger (eg TRIGGER:is)
* feats[6] : document path
* feats[7] : whole sentence
* feats[8] : sequence of pos tags between e1, e2 (exclusive)
* feats[9] : given label for semantic relation/class
* info = [feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]]
:type raw_features_struct: list of lists of strings
:param lexicon: the dictionary "pruned" mappings are updated
:type lexicon: FeatureLexicon
:param examples_list: the list to populate with generated objects of type definitions.OieExample
:type examples_list: list
:param labels_dict: the dictionary to update the values with the read relation labels_dict (encoded as a list of tokens).
:type labels_dict: dict example ID (int) => goldstandard label (list of tokens/strings)
:param threshold: feature has to be found at least 'threshold' number of times
:type threshold: int
"""
start = time.clock()
print "Creating training examples and putting into list structure..."
index = 0
for i, feats in enumerate(raw_features_struct): # a list of lists of strings [[0, f1, f2, .., f9], [1, ..], .., [N, ..]]
feat_ids = get_thresholded_features(lexicon, feat_extractors,
[feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]], feats[2], feats[3], expand=True,
threshold=threshold)
example = OieExample(feats[2], feats[3], feat_ids, feats[5], relation=feats[9])
labels_dict[index] = feats[-1].strip().split(' ')
index += 1
examples_list.append(example)
print ' Unique thresholded feature keys: {}'.format(lexicon.nextIdPruned)
print ' Done in {:.1f} sec'.format(time.clock() - start)
def pickle_objects(feat_extrs, feat_lex, dataset_splits, goldstandard_splits, a_file):
"""Pickles the input objects in the specified file.
:param feat_extrs: feature extractors
:type feat_extrs: list of callable objects
:param feat_lex: indexed feature values extracted from mined sentences
:type feat_lex: FeatureLexicon
:param dataset_splits: the collection of sentences split into 'train', 'test', 'valid' sets. Maps splits to examples
:type dataset_splits: dict; str {'train', 'test', 'valid'} => list (of instances of definitions.OieExample)
:param goldstandard_splits: the true relation labels. Maps splits {'train', 'test', 'valid'} to example-label mappings
:type goldstandard_splits: dict; str {'train', 'test', 'valid'} => dict (int => list). List holds the tokens (strings) representing the label for the example int ID
:param a_file: the target file to pickle the objects to
:type a_file: str
"""
start = time.time()
print 'Pickling feature extraction functions, feature lexicon, dataset_splits batch examples and goldstandard_splits labels...'
assert type(feat_extrs) == list, 'Expected a list of callables as the 1st object to be pickled'
for _ in feat_extrs:
assert callable(_) is True, 'Element {} of 1st object is not callable'.format(_)
assert isinstance(feat_lex, FeatureLexicon), "Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead".format(type(feat_lex))
assert type(dataset_splits) == dict, 'Expected a dict as the 3rd object to be pickled'
for _ in dataset_splits:
assert _ in ['train', 'test', 'valid'], "The dict expected as the 3rd object to be pickled, has key '{}' not in ['train', 'test', 'valid']".format(_)
assert type(goldstandard_splits) == dict, 'Expected a dict as the 4th object to be pickled'
for _ in goldstandard_splits:
assert _ in ['train', 'test', 'valid'], "The dict expected as the 4th object to be pickled, has key '{}' not in ['train', 'test', 'valid']".format(_)
with open(a_file, 'wb') as pkl_file:
pickle.dump(feat_extrs, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(feat_lex, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(dataset_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(goldstandard_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)
print ' Done in {:.2f} sec'.format(time.time() - start)
def unpickle_objects(a_file, verbose=False, debug=False):
"""
Unpickles the input file and returns references to the retrieved objects. Objects are assumed to have been pickled in the below order:\n
* list: its elements are callable objects representing feature extrating functions
* FeatureLexicon: holds the 5 dictionaries (mapping IDs, features (strings) and triggering frequencies), built from the mined sentences
* dict: has keys 'train', 'test', 'dev' each mapping to a list of instances of type definitions.OieExample
* dict: has keys 'train', 'test', 'dev' each mapping to a dict mapping integers (IDs) to lists of tokens. Each list can have one or more string tokens representing the relation label\n
:param a_file: file containing pickled objects
:type a_file: str
:param verbose: prints informative messages
:type verbose: bool
:param debug: if true prints the type of each object loaded
:type debug: bool
:return: references to the unpickled objects
:rtype: list, FeatureLexicon, dict, dict
"""
start = time.time()
with open(a_file, 'rb') as pkl_file:
if verbose:
print "Opened pickled file '{}'".format(a_file)
feature_extraction_functions = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(feature_extraction_functions).__name__)
assert type(feature_extraction_functions) == list
the_relation_lexicon = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_relation_lexicon).__name__)
assert isinstance(the_relation_lexicon, FeatureLexicon), "Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead".format(type(the_relation_lexicon))
the_dataset = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_dataset).__name__)
assert type(the_dataset) == dict
the_goldstandard = pickle.load(pkl_file)
if debug:
print "Loaded object of type '{}'".format(type(the_goldstandard).__name__)
assert type(the_goldstandard) == dict
if verbose:
print ' loaded feature extractors:', ', '.join(("'" + str(_.__name__) + "'" for _ in feature_extraction_functions))
print ' loaded dataset with {} splits'.format(', '.join(("'" + _ + "'" for _ in the_dataset.iterkeys())))
print 'Done in {:.2f} sec'.format(time.time() - start)
return feature_extraction_functions, the_relation_lexicon, the_dataset, the_goldstandard
def get_cmd_arguments():
myparser = argparse.ArgumentParser(description='Processes an Oie file and add its representations to a Python pickled file.')
myparser.add_argument('input_file', metavar='input-file', help='input file in the Yao format, like data-sample.txt')
myparser.add_argument('pickled_dataset', metavar='pickled-dataset', help='pickle file to be used to store output (created if empty)')
myparser.add_argument('--batch', metavar='batch-name', default="train", nargs="?", help="name used as a reference in the pickled file, default is 'train'")
myparser.add_argument('--thres', metavar='threshold-value', default="0", nargs="?", type=int, help='minimum feature frequency')
myparser.add_argument('--test-mode', action='store_true', help='used for test files. If true the feature space is not expanded, so that previously unseen features are not added to the dicts')
return myparser.parse_args()
if __name__ == '__main__':
t_start = time.time()
args = get_cmd_arguments()
# reads the tabbed separated file into a list of lists of strings, representing extracted features
exs_raw_features = read_examples(args.input_file)
feat_extractors = OieFeatures.getBasicCleanFeatures() # list of callable feature extraction functions
relation_lexicon = FeatureLexicon()
dataset = {} # dict mapping keys 'train', 'test', 'dev' to a list of OieExample instances
# dict mapping each key 'train', 'test', 'dev' to a dictionary mapping int to a list of strings, representing goldstandard relation labels
# each inner list contains the tokens that comprise the label (i.e. ['is-a']). Most are expected to have a single token.
goldstandard = {}
if os.path.exists(args.pickled_dataset): # if found pickled objects, else pickle into new file
feat_extractors, relation_lexicon, dataset, goldstandard = unpickle_objects(args.pickled_dataset)
examples = [] # list of instances of definitions.OieExample
relation_labels = {} # dictionary mapping int to list of strings
if args.batch in dataset:
examples = dataset[args.batch] # list of OieExamples for the 'batch_name' input split of the dataset
# dict with the goldstandard labels (lists of token(s)) for the 'batch_name' input split of the dataset
relation_labels = goldstandard[args.batch]
else:
# insert the input batch name as a key in the 'dataset' dict, mapping to an empty list (for now)
dataset[args.batch] = examples
# insert the input batch name as a key in the 'goldstandard' dict, mapping to an empty dict (for now)
goldstandard[args.batch] = relation_labels
# update statistics and mappings for given split
build_feature_lexicon(exs_raw_features, feat_extractors, relation_lexicon)
# update the dataset split and goldstandard mappings with the thresholded extractions
load_features(exs_raw_features, relation_lexicon, examples, relation_labels, args.thres)
pickle_objects(feat_extractors, relation_lexicon, dataset, goldstandard, args.pickled_dataset)
|
normal
|
{
"blob_id": "8102bdf4d29d2d3a1bdddbcfb6045b0660693996",
"index": 402,
"step-1": "import os\nimport time\nimport argparse\nimport cPickle as pickle\nfrom definitions import OieFeatures\nfrom definitions.OieExample import OieExample\n\n\nclass FeatureLexicon:\n \"\"\"\n A wrapper around various dictionaries storing the mined data. It holds 5 dictionaries in total. Two of them store\n mappings\\n\n - str => int\\n\n - int => str\\n\n about all features extracted. An update in these dicts causes an update in the dict holding frequencies, which maps\\n\n - str => float\\n\n The final two dicts contain mappings\\n\n - str => int\n - int => str\\n\n about features which trigger with frequency that exceeds the given threshold.\\n\n All the string dictionaries keys are of the form 'featDef#value' (i.e. 'posPatternPath#JJ_VV_NN', 'bigrams#e1_t2')\n \"\"\"\n def __init__(self):\n self.nextId = 0 # var pointing to the next available id number to be used when inserting new stuff\n self.id2Str = {} # map: int => str\n self.str2Id = {} # map: str => int\n self.id2freq = {} # map: int => float. Gets updated only when 'get_or_add' ins invoked not 'get_or_add_pruned'\n self.nextIdPruned = 0 # pointer\n self.id2StrPruned = {} # map: int => str\n self.str2IdPruned = {} # map: str => int\n\n def get_or_add(self, s):\n \"\"\"\n Returns the numerical ID of the input string mapped by the 'str2Id' dictionary and increments its frequency by 1.\n If the input string is not present, it inserts it in the 'str2Id' dict, sets its frequency to 1\n and returns its new numerical ID.\n :param s: string to search for. eg s = posPatternPath#JJ_VV_NN\n :return: the id of the input string as an integer\n \"\"\"\n if s not in self.str2Id:\n self.id2Str[self.nextId] = s\n self.str2Id[s] = self.nextId\n self.id2freq[self.nextId] = 1\n self.nextId += 1\n else:\n self.id2freq[self.str2Id[s]] += 1\n return self.str2Id[s]\n\n def get_or_add_pruned(self, s):\n \"\"\"\n Returns the numerical ID of the input string mapped by the 'str2IdPruned' dictionary.\n If the input string is not present, it inserts it in the 'str2IdPruned' dict and returns its new\n numerical ID. There is no frequency update here.\n :param s: string to search for belonging to the pruned ones, eg posPatternPath#NN_VV_ADJ_VBP\n :return: the id of the input string as an integer\n \"\"\"\n if s not in self.str2IdPruned:\n self.id2StrPruned[self.nextIdPruned] = s\n self.str2IdPruned[s] = self.nextIdPruned\n self.nextIdPruned += 1\n return self.str2IdPruned[s]\n\n def get_id(self, a_string):\n \"\"\"\n :param a_string: a feature such as 'bigrams#e1_t1'\n :return: the numerical ID from the str2Id dict\n \"\"\"\n if a_string not in self.str2Id:\n return None\n return self.str2Id[a_string]\n\n def get_str(self, idx):\n \"\"\"\n Returns the feature corresponding to the input numerical ID, as a string, eg 'bigrams#e1_t1'\n :param idx: a numerical ID\n :return: the feature corresponding to the input ID, mapped by sth id2Str dict\n \"\"\"\n if idx not in self.id2Str:\n return None\n else:\n return self.id2Str[idx]\n\n def get_str_pruned(self, idx):\n \"\"\"\n Returns the feature corresponding to the input numerical ID, only if the frequency of the feature triggering\n has passed a given threshold (if the key is found in the id2StrPruned dict). Returns None if not found.\n :param idx: a numerical ID\n :return: the feature function name concatenated with '#' and the string value of it (i.e. 'bigrams#e1_t1', 'arg1_lower#java')\n \"\"\"\n if idx not in self.id2StrPruned:\n return None\n else:\n return self.id2StrPruned[idx]\n\n def get_freq(self, idx):\n \"\"\"\n Returns the number of times the feature, corresponding to the input ID, has occured.\n :param idx: a numerical ID\n :return: the frequency of the input ID's feature\n \"\"\"\n if idx not in self.id2freq:\n return None\n return self.id2freq[idx]\n\n def get_feature_space_dimensionality(self):\n \"\"\"\n Returns the number of features that have passed the thresholding\\n\n :return: the number of (unique) entries in the id2strPruned dict\n \"\"\"\n return self.nextIdPruned\n\n\ndef build_feature_lexicon(raw_features, feature_extractors, lexicon):\n # invokes internally get_or_add building the str2Id, id2Str, id2freq dicts since expand parameter is True\n print 'Building feature lexicon...'\n for ex_f in raw_features:\n get_features(lexicon, feature_extractors, [ex_f[1], ex_f[4], ex_f[5], ex_f[7], ex_f[8], ex_f[6]], ex_f[2], ex_f[3], expand=True)\n print ' Lexicon now has {} unique entries'.format(lexicon.nextId)\n\n\ndef get_features(lexicon, feature_extractors, info, arg1=None, arg2=None, expand=False):\n \"\"\"\n Returns a list of the numerical IDs of the features extracted from the input information. Input information\n represents a single sentence in the mined dataset.\n :type lexicon: FeatureLexicon\n :param feature_extractors: a list of feature extraction functions as the ones defined in OieFeatures.py eg [trigger,\n entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]\n :param info: a list containing information of the input datapoint\\n\n Example\\n\n parsing : info[0] = '<-poss<-production->prep->for->pobj->'\\n\n entities : info[1] = 'JOBTITLE-JOBTITLE'\\n\n trig : info[2] = 'TRIGGER:review|performance'\\n\n sentence : info[3] = 'Supervised learning us a subset of learning methods'\\n\n pos : info[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\\n\n docPath : info[5] = './2000/01/01/1165031.xml'\n :param arg1: entity1 string, eg 'Java'\n :param arg2: entity2 string, eg 'C++'\n :type expand: Boolean flag controlling whether str2Id, id2Str and id2freq dictionaries should be expanded as new\n entries appear. If false it is assumed that inner dicts are already maximally populated.\n :return: the list of feature IDs\n \"\"\"\n feats = []\n for f in feature_extractors:\n res = f(info, arg1, arg2)\n if res is not None:\n for feat_el in generate_feature_element(res):\n _load_features(lexicon, f.__name__ + \"#\" + feat_el, feats, expand=expand)\n return feats\n\n\ndef get_thresholded_features(lexicon, feature_extractors, info, arg1, arg2, threshold, expand=False):\n \"\"\"\n Returns a list of the numerical IDs of the features extracted from the input information which frequency value\n exceed the given threshold. Input information represents a single sentence in the mined dataset.\n :type lexicon: FeatureLexicon\n :param feature_extractors: a list of feature exraction functions as the ones defined in OieFeatures.py eg [trigger,\n entityTypes, arg1_lower, arg2_lower, bow_clean, entity1Type, entity2Type, lexicalPattern, posPatternPath]\n :param info: a list containing information of the input datapoint\\n\n Example\\n\n - parsing : l[0] = '<-poss<-production->prep->for->pobj->'\\n\n - entities : l[1] = 'JOBTITLE-JOBTITLE'\\n\n - trig : l[2] = 'TRIGGER:review|performance'\\n\n - sentence : l[3] = 'Supervised learning us a subset of learning methods'\\n\n - pos : l[4] = 'DT NNP NNP , VBD IN DT NNP JJ NN NN NNP , VBZ JJ NN NN IN CD NNS IN DT NN NN IN DT NN .'\\n\n - docPath : l[5] = './2000/01/01/1165031.xml'\n :param arg1: entity1 string, eg 'Java'\n :param arg2: entity2 string, eg 'C++'\n :param expand: flag controlling whether str2IdPruned, id2StrPruned dictionaries should be expanded as new\n entries appear. If false it is assumed that inner dicts are already maximally populated.\n :type expand: bool\n :param threshold: integer to cut-off low frequency feature strings, such as i.e. infrequent bigrams of the form [bigrams#e1_t1, bigrams#e1_t2, .., posPatternPath#JJ_VV_NN]\n :return: the list of feature IDs\n \"\"\"\n feats = []\n for f in feature_extractors:\n res = f(info, arg1, arg2)\n if res is not None:\n for feat_el in generate_feature_element(res):\n _load_thresholded_features(lexicon, f.__name__ + \"#\" + feat_el, feats, threshold, expand=expand)\n return feats\n\n\ndef generate_feature_element(extractor_output):\n if type(extractor_output) == list:\n for _ in extractor_output:\n yield _\n else:\n yield extractor_output\n\n\ndef _load_features(lexicon, feat_str_id, feats, expand=False):\n if expand:\n feats.append(lexicon.get_or_add(feat_str_id))\n else:\n feat_id = lexicon.get_id(feat_str_id)\n if feat_id is not None:\n feats.append(feat_id)\n\n\ndef _load_thresholded_features(lexicon, feat_str_id, feats, thres, expand=False):\n if expand:\n if lexicon.id2freq[lexicon.get_id(feat_str_id)] > thres:\n feats.append(lexicon.get_or_add_pruned(feat_str_id))\n else:\n feat_id = lexicon.get_id(feat_str_id)\n if feat_id is not None:\n if lexicon.id2freq[feat_id] > thres:\n feats.append(lexicon.get_or_add_pruned(feat_str_id))\n\n\ndef read_examples(file_name):\n \"\"\"\n Reads the input tab-separated (\\\\\\\\t) file and returns the parsed data as a list of lists of strings. Each line, of the file to read, corresponds to a datapoint and has as many entries as the number of elements of the list returned by definitions.OieFeatures.getBasicCleanFeatures plus one.\n Raises and IOError if a line found in the input file does not have 9 elements. The returned lists are of the form:\\n\n ['counter_index', 'entry_1', 'entry_2', .., 'entry_9']\\n\n A sample file with the required format is '../data-sample.txt'.\\n\n :param file_name: a file path to read from\n :type file_name: str\n :return: of lists of strings. Each inner list has as first element a counter 0..N followed by the entries found in a line\n returned by definitions.OieFeatures.getBasicCleanFeatures corresponding to the ones in the input file\n :rtype: list\n \"\"\"\n start = time.time()\n print 'Reading examples from tab separated file...'\n count = 0\n i = 0\n with open(file_name, 'r') as fp:\n relation_examples = []\n for i, line in enumerate(fp):\n line.strip()\n if len(line) == 0 or len(line.split()) == 0:\n raise IOError\n else:\n fields = line.split('\\t')\n assert len(fields) == 9, \"a problem with the file format (# fields is wrong) len is \" + str(len(fields)) + \"instead of 9\"\n relation_examples.append([str(count)] + fields)\n count += 1\n print ' File contained {} lines'.format(i + 1)\n print ' Datapoints with valid features encoded: {}'.format(count)\n print ' Done in {:.2f} sec'.format(time.time() - start)\n return relation_examples\n\n\ndef load_features(raw_features_struct, lexicon, examples_list, labels_dict, threshold):\n \"\"\"\n Encodes the input raw feature values into OieExample objects and appends to the input examples_list\\n\n Reads relation labels_dict, from the input features if found, and updates the corresponding keys in input labels_dict with a list of tokens representing the label\\n\n It also updates the \"thresholded\" 'str2IdPruned' and 'id2StrPruneddictionaries'\n .. seealso:: :funct:`read_examples`\\nTypically, the input raw features data structure is generated by the above function.\\n\n :param raw_features_struct: the input raw features data structure read from a tab separated file\\n\n A list of lists with each inner list following the below decoder_type for 'getCleanFeatures':\n\n * feats[0] : counter\n * feats[1] : dependency parsing <-, ->, ...\n * feats[2] : entity 1 (eg java engineer)\n * feats[3] : entity 2 (eg software engineer)\n * feats[4] : entity-types-pair (eg JOBTITLE-JOBTITLE)\n * feats[5] : trigger (eg TRIGGER:is)\n * feats[6] : document path\n * feats[7] : whole sentence\n * feats[8] : sequence of pos tags between e1, e2 (exclusive)\n * feats[9] : given label for semantic relation/class\n * info = [feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]]\n\n :type raw_features_struct: list of lists of strings\n :param lexicon: the dictionary \"pruned\" mappings are updated\n :type lexicon: FeatureLexicon\n :param examples_list: the list to populate with generated objects of type definitions.OieExample\n :type examples_list: list\n :param labels_dict: the dictionary to update the values with the read relation labels_dict (encoded as a list of tokens).\n :type labels_dict: dict example ID (int) => goldstandard label (list of tokens/strings)\n :param threshold: feature has to be found at least 'threshold' number of times\n :type threshold: int\n \"\"\"\n start = time.clock()\n print \"Creating training examples and putting into list structure...\"\n index = 0\n for i, feats in enumerate(raw_features_struct): # a list of lists of strings [[0, f1, f2, .., f9], [1, ..], .., [N, ..]]\n feat_ids = get_thresholded_features(lexicon, feat_extractors,\n [feats[1], feats[4], feats[5], feats[7], feats[8], feats[6]], feats[2], feats[3], expand=True,\n threshold=threshold)\n example = OieExample(feats[2], feats[3], feat_ids, feats[5], relation=feats[9])\n labels_dict[index] = feats[-1].strip().split(' ')\n index += 1\n examples_list.append(example)\n print ' Unique thresholded feature keys: {}'.format(lexicon.nextIdPruned)\n print ' Done in {:.1f} sec'.format(time.clock() - start)\n\n\ndef pickle_objects(feat_extrs, feat_lex, dataset_splits, goldstandard_splits, a_file):\n \"\"\"Pickles the input objects in the specified file.\n\n :param feat_extrs: feature extractors\n :type feat_extrs: list of callable objects\n :param feat_lex: indexed feature values extracted from mined sentences\n :type feat_lex: FeatureLexicon\n :param dataset_splits: the collection of sentences split into 'train', 'test', 'valid' sets. Maps splits to examples\n :type dataset_splits: dict; str {'train', 'test', 'valid'} => list (of instances of definitions.OieExample)\n :param goldstandard_splits: the true relation labels. Maps splits {'train', 'test', 'valid'} to example-label mappings\n :type goldstandard_splits: dict; str {'train', 'test', 'valid'} => dict (int => list). List holds the tokens (strings) representing the label for the example int ID\n :param a_file: the target file to pickle the objects to\n :type a_file: str\n \"\"\"\n start = time.time()\n print 'Pickling feature extraction functions, feature lexicon, dataset_splits batch examples and goldstandard_splits labels...'\n assert type(feat_extrs) == list, 'Expected a list of callables as the 1st object to be pickled'\n for _ in feat_extrs:\n assert callable(_) is True, 'Element {} of 1st object is not callable'.format(_)\n assert isinstance(feat_lex, FeatureLexicon), \"Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead\".format(type(feat_lex))\n assert type(dataset_splits) == dict, 'Expected a dict as the 3rd object to be pickled'\n for _ in dataset_splits:\n assert _ in ['train', 'test', 'valid'], \"The dict expected as the 3rd object to be pickled, has key '{}' not in ['train', 'test', 'valid']\".format(_)\n assert type(goldstandard_splits) == dict, 'Expected a dict as the 4th object to be pickled'\n for _ in goldstandard_splits:\n assert _ in ['train', 'test', 'valid'], \"The dict expected as the 4th object to be pickled, has key '{}' not in ['train', 'test', 'valid']\".format(_)\n with open(a_file, 'wb') as pkl_file:\n pickle.dump(feat_extrs, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(feat_lex, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dataset_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(goldstandard_splits, pkl_file, protocol=pickle.HIGHEST_PROTOCOL)\n print ' Done in {:.2f} sec'.format(time.time() - start)\n\n\ndef unpickle_objects(a_file, verbose=False, debug=False):\n \"\"\"\n Unpickles the input file and returns references to the retrieved objects. Objects are assumed to have been pickled in the below order:\\n\n * list: its elements are callable objects representing feature extrating functions\n * FeatureLexicon: holds the 5 dictionaries (mapping IDs, features (strings) and triggering frequencies), built from the mined sentences\n * dict: has keys 'train', 'test', 'dev' each mapping to a list of instances of type definitions.OieExample\n * dict: has keys 'train', 'test', 'dev' each mapping to a dict mapping integers (IDs) to lists of tokens. Each list can have one or more string tokens representing the relation label\\n\n :param a_file: file containing pickled objects\n :type a_file: str\n :param verbose: prints informative messages\n :type verbose: bool\n :param debug: if true prints the type of each object loaded\n :type debug: bool\n :return: references to the unpickled objects\n :rtype: list, FeatureLexicon, dict, dict\n \"\"\"\n start = time.time()\n with open(a_file, 'rb') as pkl_file:\n if verbose:\n print \"Opened pickled file '{}'\".format(a_file)\n feature_extraction_functions = pickle.load(pkl_file)\n if debug:\n print \"Loaded object of type '{}'\".format(type(feature_extraction_functions).__name__)\n assert type(feature_extraction_functions) == list\n the_relation_lexicon = pickle.load(pkl_file)\n if debug:\n print \"Loaded object of type '{}'\".format(type(the_relation_lexicon).__name__)\n assert isinstance(the_relation_lexicon, FeatureLexicon), \"Expected an instance of FeatureLexicon as the 2nd object to be pickled. Got '{}' instead\".format(type(the_relation_lexicon))\n the_dataset = pickle.load(pkl_file)\n if debug:\n print \"Loaded object of type '{}'\".format(type(the_dataset).__name__)\n assert type(the_dataset) == dict\n the_goldstandard = pickle.load(pkl_file)\n if debug:\n print \"Loaded object of type '{}'\".format(type(the_goldstandard).__name__)\n assert type(the_goldstandard) == dict\n if verbose:\n print ' loaded feature extractors:', ', '.join((\"'\" + str(_.__name__) + \"'\" for _ in feature_extraction_functions))\n print ' loaded dataset with {} splits'.format(', '.join((\"'\" + _ + \"'\" for _ in the_dataset.iterkeys())))\n print 'Done in {:.2f} sec'.format(time.time() - start)\n return feature_extraction_functions, the_relation_lexicon, the_dataset, the_goldstandard\n\n\ndef get_cmd_arguments():\n myparser = argparse.ArgumentParser(description='Processes an Oie file and add its representations to a Python pickled file.')\n myparser.add_argument('input_file', metavar='input-file', help='input file in the Yao format, like data-sample.txt')\n myparser.add_argument('pickled_dataset', metavar='pickled-dataset', help='pickle file to be used to store output (created if empty)')\n myparser.add_argument('--batch', metavar='batch-name', default=\"train\", nargs=\"?\", help=\"name used as a reference in the pickled file, default is 'train'\")\n myparser.add_argument('--thres', metavar='threshold-value', default=\"0\", nargs=\"?\", type=int, help='minimum feature frequency')\n myparser.add_argument('--test-mode', action='store_true', help='used for test files. If true the feature space is not expanded, so that previously unseen features are not added to the dicts')\n return myparser.parse_args()\n\n\nif __name__ == '__main__':\n t_start = time.time()\n args = get_cmd_arguments()\n\n # reads the tabbed separated file into a list of lists of strings, representing extracted features\n exs_raw_features = read_examples(args.input_file)\n\n feat_extractors = OieFeatures.getBasicCleanFeatures() # list of callable feature extraction functions\n relation_lexicon = FeatureLexicon()\n dataset = {} # dict mapping keys 'train', 'test', 'dev' to a list of OieExample instances\n\n # dict mapping each key 'train', 'test', 'dev' to a dictionary mapping int to a list of strings, representing goldstandard relation labels\n # each inner list contains the tokens that comprise the label (i.e. ['is-a']). Most are expected to have a single token.\n goldstandard = {}\n\n if os.path.exists(args.pickled_dataset): # if found pickled objects, else pickle into new file\n feat_extractors, relation_lexicon, dataset, goldstandard = unpickle_objects(args.pickled_dataset)\n\n examples = [] # list of instances of definitions.OieExample\n relation_labels = {} # dictionary mapping int to list of strings\n\n if args.batch in dataset:\n examples = dataset[args.batch] # list of OieExamples for the 'batch_name' input split of the dataset\n # dict with the goldstandard labels (lists of token(s)) for the 'batch_name' input split of the dataset\n relation_labels = goldstandard[args.batch]\n else:\n # insert the input batch name as a key in the 'dataset' dict, mapping to an empty list (for now)\n dataset[args.batch] = examples\n # insert the input batch name as a key in the 'goldstandard' dict, mapping to an empty dict (for now)\n goldstandard[args.batch] = relation_labels\n\n # update statistics and mappings for given split\n build_feature_lexicon(exs_raw_features, feat_extractors, relation_lexicon)\n\n # update the dataset split and goldstandard mappings with the thresholded extractions\n load_features(exs_raw_features, relation_lexicon, examples, relation_labels, args.thres)\n\n pickle_objects(feat_extractors, relation_lexicon, dataset, goldstandard, args.pickled_dataset)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: UTF-8 -*-
'''
Evaluate trained PredNet on KITTI sequences.
Calculates mean-squared error and plots predictions.
'''
import os
import numpy as np
from six.moves import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Input, Dense, Flatten
from prednet import PredNet
from data_utils import SequenceGenerator
from kitti_settings import *
n_plot = 40
batch_size = 10
nt = 5
# 相关的weights,json的文件
weights_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_weights.hdf5')
json_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')
# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')
# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights-extrapfinetuned.hdf5') # where weights will be saved
# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model-extrapfinetuned.json')
test_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')
test_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')
# Load trained model
# 加载模型的json文件
f = open(json_file, 'r')
# 读取的json文件
json_string = f.read()
f.close()
# 从训练后存储的模型中序列化出模型,同时包含PredNet模型定制的参数,之后加载权重模型
# 存储模型将相应的json文件和weights文件存储即可,加载模型从对应的json文件和weights文件反序列化即可
train_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})
train_model.load_weights(weights_file)
# Create testing model (to output predictions)
# 创建测试模型
# 训练模型包含了InputLayer,PredNet等等,这里选取第二层即为PredNet
# print(train_model.layers)
layer_config = train_model.layers[1].get_config()
# 评估版本中将output_mode输出模型从误差error修改为predication预测
layer_config['output_mode'] = 'prediction'
data_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']
# 将网络中部分修改参数加载重构为PredNet网络,keras中具有get_config和get_weights等方法
test_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)
# 输入层的shape为不包括batch的batch_input_shape从第一列之后的所有
# input_shape = list(train_model.layers[0].batch_input_shape[1:])
# 输入数据为nt,总共有10帧,来预测将来的一帧
# input_shape[0] = nt
# print('input_shape:', input_shape)
test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)
X_test = test_generator.create_all()
input_shape = X_test.shape[1:]
# print('input_shape:', input_shape)
# 构建输入层
inputs = Input(shape=tuple(input_shape))
# 将输入层输入到prednet网络中测试输出
predictions = test_prednet(inputs)
# 构建输入和输出模型
test_model = Model(inputs=inputs, outputs=predictions)
# 测试评估数据生成器
# test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)
# X_test = test_generator.create_all()
# 预测模型时参照batch_size,一个批次的进行load然后predict
X_hat = test_model.predict(X_test, batch_size)
# 这里模型的默认通道均在最后一位
if data_format == 'channels_first':
X_test = np.transpose(X_test, (0, 1, 3, 4, 2))
X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))
print('X_hat.shape:', X_hat.shape)
print('X_test.shape:', X_test.shape)
# Compare MSE of PredNet predictions vs. using last frame. Write results to prediction_scores.txt
# 比较测试结果
mse_model = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 ) # look at all timesteps except the first
mse_prev = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 )
if not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)
f = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')
f.write("Model MSE: %f\n" % mse_model)
f.write("Previous Frame MSE: %f" % mse_prev)
f.close()
# Plot some predictions
aspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]
plt.figure(figsize = (nt, 2*aspect_ratio))
gs = gridspec.GridSpec(2, nt)
gs.update(wspace=0., hspace=0.)
plot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')
if not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)
plot_idx = np.random.permutation(X_test.shape[0])[:n_plot]
for i in plot_idx:
for t in range(nt):
plt.subplot(gs[t])
plt.imshow(X_test[i,t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Actual', fontsize=10)
plt.subplot(gs[t + nt])
plt.imshow(X_hat[i,t], interpolation='none')
plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
if t==0: plt.ylabel('Predicted', fontsize=10)
plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')
plt.clf()
|
normal
|
{
"blob_id": "a3507019ca3310d7ad7eb2a0168dcdfe558643f6",
"index": 1615,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nf.close()\n<mask token>\ntrain_model.load_weights(weights_file)\n<mask token>\nif data_format == 'channels_first':\n X_test = np.transpose(X_test, (0, 1, 3, 4, 2))\n X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))\nprint('X_hat.shape:', X_hat.shape)\nprint('X_test.shape:', X_test.shape)\n<mask token>\nif not os.path.exists(RESULTS_SAVE_DIR):\n os.mkdir(RESULTS_SAVE_DIR)\n<mask token>\nf.write('Model MSE: %f\\n' % mse_model)\nf.write('Previous Frame MSE: %f' % mse_prev)\nf.close()\n<mask token>\nplt.figure(figsize=(nt, 2 * aspect_ratio))\n<mask token>\ngs.update(wspace=0.0, hspace=0.0)\n<mask token>\nif not os.path.exists(plot_save_dir):\n os.mkdir(plot_save_dir)\n<mask token>\nfor i in plot_idx:\n for t in range(nt):\n plt.subplot(gs[t])\n plt.imshow(X_test[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Actual', fontsize=10)\n plt.subplot(gs[t + nt])\n plt.imshow(X_hat[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Predicted', fontsize=10)\n plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')\n plt.clf()\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nn_plot = 40\nbatch_size = 10\nnt = 5\nweights_file = os.path.join(WEIGHTS_DIR,\n 'prednet_facebook_segmpred_weights.hdf5')\njson_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')\ntest_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')\ntest_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')\nf = open(json_file, 'r')\njson_string = f.read()\nf.close()\ntrain_model = model_from_json(json_string, custom_objects={'PredNet': PredNet})\ntrain_model.load_weights(weights_file)\nlayer_config = train_model.layers[1].get_config()\nlayer_config['output_mode'] = 'prediction'\ndata_format = layer_config['data_format'\n ] if 'data_format' in layer_config else layer_config['dim_ordering']\ntest_prednet = PredNet(weights=train_model.layers[1].get_weights(), **\n layer_config)\ntest_generator = SequenceGenerator(test_file, test_sources, nt,\n sequence_start_mode='unique', data_format=data_format)\nX_test = test_generator.create_all()\ninput_shape = X_test.shape[1:]\ninputs = Input(shape=tuple(input_shape))\npredictions = test_prednet(inputs)\ntest_model = Model(inputs=inputs, outputs=predictions)\nX_hat = test_model.predict(X_test, batch_size)\nif data_format == 'channels_first':\n X_test = np.transpose(X_test, (0, 1, 3, 4, 2))\n X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))\nprint('X_hat.shape:', X_hat.shape)\nprint('X_test.shape:', X_test.shape)\nmse_model = np.mean((X_test[:, 1:] - X_hat[:, 1:]) ** 2)\nmse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:]) ** 2)\nif not os.path.exists(RESULTS_SAVE_DIR):\n os.mkdir(RESULTS_SAVE_DIR)\nf = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')\nf.write('Model MSE: %f\\n' % mse_model)\nf.write('Previous Frame MSE: %f' % mse_prev)\nf.close()\naspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]\nplt.figure(figsize=(nt, 2 * aspect_ratio))\ngs = gridspec.GridSpec(2, nt)\ngs.update(wspace=0.0, hspace=0.0)\nplot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')\nif not os.path.exists(plot_save_dir):\n os.mkdir(plot_save_dir)\nplot_idx = np.random.permutation(X_test.shape[0])[:n_plot]\nfor i in plot_idx:\n for t in range(nt):\n plt.subplot(gs[t])\n plt.imshow(X_test[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Actual', fontsize=10)\n plt.subplot(gs[t + nt])\n plt.imshow(X_hat[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Predicted', fontsize=10)\n plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')\n plt.clf()\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nfrom six.moves import cPickle\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom keras import backend as K\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Input, Dense, Flatten\nfrom prednet import PredNet\nfrom data_utils import SequenceGenerator\nfrom kitti_settings import *\nn_plot = 40\nbatch_size = 10\nnt = 5\nweights_file = os.path.join(WEIGHTS_DIR,\n 'prednet_facebook_segmpred_weights.hdf5')\njson_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')\ntest_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')\ntest_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')\nf = open(json_file, 'r')\njson_string = f.read()\nf.close()\ntrain_model = model_from_json(json_string, custom_objects={'PredNet': PredNet})\ntrain_model.load_weights(weights_file)\nlayer_config = train_model.layers[1].get_config()\nlayer_config['output_mode'] = 'prediction'\ndata_format = layer_config['data_format'\n ] if 'data_format' in layer_config else layer_config['dim_ordering']\ntest_prednet = PredNet(weights=train_model.layers[1].get_weights(), **\n layer_config)\ntest_generator = SequenceGenerator(test_file, test_sources, nt,\n sequence_start_mode='unique', data_format=data_format)\nX_test = test_generator.create_all()\ninput_shape = X_test.shape[1:]\ninputs = Input(shape=tuple(input_shape))\npredictions = test_prednet(inputs)\ntest_model = Model(inputs=inputs, outputs=predictions)\nX_hat = test_model.predict(X_test, batch_size)\nif data_format == 'channels_first':\n X_test = np.transpose(X_test, (0, 1, 3, 4, 2))\n X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))\nprint('X_hat.shape:', X_hat.shape)\nprint('X_test.shape:', X_test.shape)\nmse_model = np.mean((X_test[:, 1:] - X_hat[:, 1:]) ** 2)\nmse_prev = np.mean((X_test[:, :-1] - X_test[:, 1:]) ** 2)\nif not os.path.exists(RESULTS_SAVE_DIR):\n os.mkdir(RESULTS_SAVE_DIR)\nf = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')\nf.write('Model MSE: %f\\n' % mse_model)\nf.write('Previous Frame MSE: %f' % mse_prev)\nf.close()\naspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]\nplt.figure(figsize=(nt, 2 * aspect_ratio))\ngs = gridspec.GridSpec(2, nt)\ngs.update(wspace=0.0, hspace=0.0)\nplot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')\nif not os.path.exists(plot_save_dir):\n os.mkdir(plot_save_dir)\nplot_idx = np.random.permutation(X_test.shape[0])[:n_plot]\nfor i in plot_idx:\n for t in range(nt):\n plt.subplot(gs[t])\n plt.imshow(X_test[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Actual', fontsize=10)\n plt.subplot(gs[t + nt])\n plt.imshow(X_hat[i, t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off',\n left='off', right='off', labelbottom='off', labelleft='off')\n if t == 0:\n plt.ylabel('Predicted', fontsize=10)\n plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')\n plt.clf()\n",
"step-5": "# -*- coding: UTF-8 -*-\n'''\nEvaluate trained PredNet on KITTI sequences.\nCalculates mean-squared error and plots predictions.\n'''\n\nimport os\nimport numpy as np\nfrom six.moves import cPickle\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\nfrom keras import backend as K\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Input, Dense, Flatten\n\nfrom prednet import PredNet\nfrom data_utils import SequenceGenerator\nfrom kitti_settings import *\n\n\nn_plot = 40\nbatch_size = 10\nnt = 5\n\n# 相关的weights,json的文件\nweights_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_weights.hdf5')\njson_file = os.path.join(WEIGHTS_DIR, 'prednet_facebook_segmpred_model.json')\n# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')\n# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')\n# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights-extrapfinetuned.hdf5') # where weights will be saved\n# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model-extrapfinetuned.json')\ntest_file = os.path.join(DATA_DIR, 'facebook_segmpred_X_test.hkl')\ntest_sources = os.path.join(DATA_DIR, 'facebook_segmpred_sources_test.hkl')\n\n# Load trained model\n# 加载模型的json文件\nf = open(json_file, 'r')\n# 读取的json文件\njson_string = f.read()\nf.close()\n# 从训练后存储的模型中序列化出模型,同时包含PredNet模型定制的参数,之后加载权重模型\n# 存储模型将相应的json文件和weights文件存储即可,加载模型从对应的json文件和weights文件反序列化即可\ntrain_model = model_from_json(json_string, custom_objects = {'PredNet': PredNet})\ntrain_model.load_weights(weights_file)\n\n# Create testing model (to output predictions)\n# 创建测试模型\n# 训练模型包含了InputLayer,PredNet等等,这里选取第二层即为PredNet\n# print(train_model.layers)\nlayer_config = train_model.layers[1].get_config()\n# 评估版本中将output_mode输出模型从误差error修改为predication预测\nlayer_config['output_mode'] = 'prediction'\ndata_format = layer_config['data_format'] if 'data_format' in layer_config else layer_config['dim_ordering']\n# 将网络中部分修改参数加载重构为PredNet网络,keras中具有get_config和get_weights等方法\ntest_prednet = PredNet(weights=train_model.layers[1].get_weights(), **layer_config)\n# 输入层的shape为不包括batch的batch_input_shape从第一列之后的所有\n# input_shape = list(train_model.layers[0].batch_input_shape[1:])\n# 输入数据为nt,总共有10帧,来预测将来的一帧\n# input_shape[0] = nt\n# print('input_shape:', input_shape)\ntest_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)\nX_test = test_generator.create_all()\ninput_shape = X_test.shape[1:]\n# print('input_shape:', input_shape)\n# 构建输入层\ninputs = Input(shape=tuple(input_shape))\n# 将输入层输入到prednet网络中测试输出\npredictions = test_prednet(inputs)\n# 构建输入和输出模型\ntest_model = Model(inputs=inputs, outputs=predictions)\n\n# 测试评估数据生成器\n# test_generator = SequenceGenerator(test_file, test_sources, nt, sequence_start_mode='unique', data_format=data_format)\n# X_test = test_generator.create_all()\n# 预测模型时参照batch_size,一个批次的进行load然后predict\nX_hat = test_model.predict(X_test, batch_size)\n# 这里模型的默认通道均在最后一位\nif data_format == 'channels_first':\n X_test = np.transpose(X_test, (0, 1, 3, 4, 2))\n X_hat = np.transpose(X_hat, (0, 1, 3, 4, 2))\nprint('X_hat.shape:', X_hat.shape)\nprint('X_test.shape:', X_test.shape)\n# Compare MSE of PredNet predictions vs. using last frame. Write results to prediction_scores.txt\n# 比较测试结果\nmse_model = np.mean( (X_test[:, 1:] - X_hat[:, 1:])**2 ) # look at all timesteps except the first\nmse_prev = np.mean( (X_test[:, :-1] - X_test[:, 1:])**2 )\nif not os.path.exists(RESULTS_SAVE_DIR): os.mkdir(RESULTS_SAVE_DIR)\nf = open(RESULTS_SAVE_DIR + 'prediction_scores.txt', 'w')\nf.write(\"Model MSE: %f\\n\" % mse_model)\nf.write(\"Previous Frame MSE: %f\" % mse_prev)\nf.close()\n\n# Plot some predictions\naspect_ratio = float(X_hat.shape[2]) / X_hat.shape[3]\nplt.figure(figsize = (nt, 2*aspect_ratio))\ngs = gridspec.GridSpec(2, nt)\ngs.update(wspace=0., hspace=0.)\nplot_save_dir = os.path.join(RESULTS_SAVE_DIR, 'prediction_plots/')\nif not os.path.exists(plot_save_dir): os.mkdir(plot_save_dir)\nplot_idx = np.random.permutation(X_test.shape[0])[:n_plot]\nfor i in plot_idx:\n for t in range(nt):\n plt.subplot(gs[t])\n plt.imshow(X_test[i,t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')\n if t==0: plt.ylabel('Actual', fontsize=10)\n\n plt.subplot(gs[t + nt])\n plt.imshow(X_hat[i,t], interpolation='none')\n plt.tick_params(axis='both', which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')\n if t==0: plt.ylabel('Predicted', fontsize=10)\n\n plt.savefig(plot_save_dir + 'plot_' + str(i) + '.png')\n plt.clf()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
import numpy as np
# 描画用サンプルデータ
#x= np.array([0,1,2,3,4])
y = np.array([2, 2, 3, 4, 5])
print(y)
#print(range(y))
plt.figure(figsize=(10,1))
plt.bar(range(len(y)), y)
plt.savefig('test.png')
plt.clf()
|
normal
|
{
"blob_id": "2f714ed54a19ec26d7ecb1979e79366721b3d0fe",
"index": 6682,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-3": "<mask token>\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\nplt.figure(figsize=(10, 1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\n# 描画用サンプルデータ\n#x= np.array([0,1,2,3,4])\ny = np.array([2, 2, 3, 4, 5])\nprint(y)\n#print(range(y))\n\nplt.figure(figsize=(10,1))\nplt.bar(range(len(y)), y)\nplt.savefig('test.png')\nplt.clf()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mylcd.lcd_clear()
mylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)
mylcd.lcd_display_string(' See you again ~', 2)
mylcd.lcd_display_string('http://rasplay.org', 3)
mylcd.lcd_display_string('RaspberryPi Village', 4)
sleep(2)
os.system('shutdown now -h')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mylcd = I2C_LCD_driver.lcd()
mylcd.lcd_clear()
mylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)
mylcd.lcd_display_string(' See you again ~', 2)
mylcd.lcd_display_string('http://rasplay.org', 3)
mylcd.lcd_display_string('RaspberryPi Village', 4)
sleep(2)
os.system('shutdown now -h')
<|reserved_special_token_1|>
import I2C_LCD_driver
from time import *
import os
mylcd = I2C_LCD_driver.lcd()
mylcd.lcd_clear()
mylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)
mylcd.lcd_display_string(' See you again ~', 2)
mylcd.lcd_display_string('http://rasplay.org', 3)
mylcd.lcd_display_string('RaspberryPi Village', 4)
sleep(2)
os.system('shutdown now -h')
<|reserved_special_token_1|>
#!/usr/bin/python
# Original code found at:
# https://github.com/zzeromin/raspberrypi/tree/master/i2c_lcd
# requires I2C_LCD_driver.py
import I2C_LCD_driver
from time import *
import os
mylcd = I2C_LCD_driver.lcd()
mylcd.lcd_clear()
mylcd.lcd_display_string("RAS Hi-Pi shutdown", 1)
mylcd.lcd_display_string(" See you again ~", 2)
mylcd.lcd_display_string("http://rasplay.org", 3)
mylcd.lcd_display_string("RaspberryPi Village", 4)
sleep(2) # 2 sec delay
os.system("shutdown now -h")
|
flexible
|
{
"blob_id": "df60d3b829c5702385f59fdefaea04f569fb7db2",
"index": 9058,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmylcd.lcd_clear()\nmylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)\nmylcd.lcd_display_string(' See you again ~', 2)\nmylcd.lcd_display_string('http://rasplay.org', 3)\nmylcd.lcd_display_string('RaspberryPi Village', 4)\nsleep(2)\nos.system('shutdown now -h')\n",
"step-3": "<mask token>\nmylcd = I2C_LCD_driver.lcd()\nmylcd.lcd_clear()\nmylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)\nmylcd.lcd_display_string(' See you again ~', 2)\nmylcd.lcd_display_string('http://rasplay.org', 3)\nmylcd.lcd_display_string('RaspberryPi Village', 4)\nsleep(2)\nos.system('shutdown now -h')\n",
"step-4": "import I2C_LCD_driver\nfrom time import *\nimport os\nmylcd = I2C_LCD_driver.lcd()\nmylcd.lcd_clear()\nmylcd.lcd_display_string('RAS Hi-Pi shutdown', 1)\nmylcd.lcd_display_string(' See you again ~', 2)\nmylcd.lcd_display_string('http://rasplay.org', 3)\nmylcd.lcd_display_string('RaspberryPi Village', 4)\nsleep(2)\nos.system('shutdown now -h')\n",
"step-5": "#!/usr/bin/python\n# Original code found at:\n# https://github.com/zzeromin/raspberrypi/tree/master/i2c_lcd\n# requires I2C_LCD_driver.py\n\nimport I2C_LCD_driver\nfrom time import *\nimport os\n\nmylcd = I2C_LCD_driver.lcd()\nmylcd.lcd_clear()\n\nmylcd.lcd_display_string(\"RAS Hi-Pi shutdown\", 1)\nmylcd.lcd_display_string(\" See you again ~\", 2)\nmylcd.lcd_display_string(\"http://rasplay.org\", 3)\nmylcd.lcd_display_string(\"RaspberryPi Village\", 4)\nsleep(2) # 2 sec delay\n\nos.system(\"shutdown now -h\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class K80(TN93):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, kappa, scale_q=True):
super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class K80(TN93):
_name = 'K80'
_freqs = fixed_equal_nucleotide_frequencies.copy()
def __init__(self, kappa, scale_q=True):
super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q
)
<|reserved_special_token_1|>
from phylo_utils.data import fixed_equal_nucleotide_frequencies
from phylo_utils.substitution_models.tn93 import TN93
class K80(TN93):
_name = 'K80'
_freqs = fixed_equal_nucleotide_frequencies.copy()
def __init__(self, kappa, scale_q=True):
super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q
)
|
flexible
|
{
"blob_id": "0f0595793e98187c6aaf5b1f4b59affb06bb598e",
"index": 3159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass K80(TN93):\n <mask token>\n <mask token>\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-3": "<mask token>\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-4": "from phylo_utils.data import fixed_equal_nucleotide_frequencies\nfrom phylo_utils.substitution_models.tn93 import TN93\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
<|reserved_special_token_0|>
class Subject(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def filter_question(cls):
id_l = os.listdir(settings.QUESTION_PATH)
r_id_l = random.sample(id_l, 3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' % (self.type, self.comment)
class Customer(Base):
DB_PATH = settings.CUSTOMER_PATH
def __init__(self, name, sex, age, phone):
self.id = common.create_id()
self.name = name
self.sex = sex
self.age = age
self.phone = phone
class Record(Base):
DB_PATH = settings.RECORD_PATH
def __init__(self, customer_id, record_list, total_score):
self.id = common.create_id()
self.customer_id = customer_id
self.record_list = record_list
self.total_score = total_score
self.sub_time = time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls, phone):
records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj = Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH = settings.PRIZE_PATH
def __init__(self, name):
self.id = common.create_id()
self.name = name
@classmethod
def create_prize(cls):
while True:
name = input('奖品名: ').strip()
if not name:
continue
obj = Prize(name)
obj.save()
choice = input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls, name):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' % self.name
class Customer2Prize(Base):
DB_PATH = settings.C2P_PATH
def __init__(self, customer_id, prize_id):
self.id = common.create_id()
self.customer_id = customer_id
self.prize_id = prize_id
@classmethod
def get_obj_by_customer_id(cls, customer_id):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls, customer_id):
"""
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
"""
num = random.randint(1, 100)
if num == 1:
prize_name = '欧洲十国游'
if num > 1 and num <= 11:
prize_name = 'mac电脑'
if num > 11 and num <= 61:
prize_name = '珍藏版alex写真集一套'
if num > 61:
prize_name = 'egon签名一个'
prize = Prize.get_obj_by_name(prize_name)
obj = cls(customer_id, prize.id)
obj.save()
return prize_name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Subject(Base):
<|reserved_special_token_0|>
def __init__(self, type, comment, choice, right_res, score=5):
self.id = common.create_id()
self.type = type
self.comment = comment
self.choice = choice
self.right_res = right_res
self.score = score
@classmethod
def create_from_file(cls, src_file):
data = xlrd.open_workbook(src_file)
table = data.sheets()[0]
subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}
for i in range(2, table.nrows):
row = table.row_values(i)
if len(subject['choice']) == 4:
obj = cls(subject['type'], subject['comment'], subject[
'choice'], subject['res'])
obj.save()
subject = {'type': None, 'comment': None, 'choice': [],
'res': set()}
if row[0]:
subject['type'] = row[0]
subject['comment'] = row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str = row[2].strip()
res = res_str[0].upper()
subject['res'].add(res)
else:
obj = cls(subject['type'], subject['comment'], subject['choice'
], subject['res'])
obj.save()
@classmethod
def filter_question(cls):
id_l = os.listdir(settings.QUESTION_PATH)
r_id_l = random.sample(id_l, 3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' % (self.type, self.comment)
class Customer(Base):
DB_PATH = settings.CUSTOMER_PATH
def __init__(self, name, sex, age, phone):
self.id = common.create_id()
self.name = name
self.sex = sex
self.age = age
self.phone = phone
class Record(Base):
DB_PATH = settings.RECORD_PATH
def __init__(self, customer_id, record_list, total_score):
self.id = common.create_id()
self.customer_id = customer_id
self.record_list = record_list
self.total_score = total_score
self.sub_time = time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls, phone):
records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj = Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH = settings.PRIZE_PATH
def __init__(self, name):
self.id = common.create_id()
self.name = name
@classmethod
def create_prize(cls):
while True:
name = input('奖品名: ').strip()
if not name:
continue
obj = Prize(name)
obj.save()
choice = input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls, name):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' % self.name
class Customer2Prize(Base):
DB_PATH = settings.C2P_PATH
def __init__(self, customer_id, prize_id):
self.id = common.create_id()
self.customer_id = customer_id
self.prize_id = prize_id
@classmethod
def get_obj_by_customer_id(cls, customer_id):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls, customer_id):
"""
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
"""
num = random.randint(1, 100)
if num == 1:
prize_name = '欧洲十国游'
if num > 1 and num <= 11:
prize_name = 'mac电脑'
if num > 11 and num <= 61:
prize_name = '珍藏版alex写真集一套'
if num > 61:
prize_name = 'egon签名一个'
prize = Prize.get_obj_by_name(prize_name)
obj = cls(customer_id, prize.id)
obj.save()
return prize_name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Subject(Base):
DB_PATH = settings.QUESTION_PATH
def __init__(self, type, comment, choice, right_res, score=5):
self.id = common.create_id()
self.type = type
self.comment = comment
self.choice = choice
self.right_res = right_res
self.score = score
@classmethod
def create_from_file(cls, src_file):
data = xlrd.open_workbook(src_file)
table = data.sheets()[0]
subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}
for i in range(2, table.nrows):
row = table.row_values(i)
if len(subject['choice']) == 4:
obj = cls(subject['type'], subject['comment'], subject[
'choice'], subject['res'])
obj.save()
subject = {'type': None, 'comment': None, 'choice': [],
'res': set()}
if row[0]:
subject['type'] = row[0]
subject['comment'] = row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str = row[2].strip()
res = res_str[0].upper()
subject['res'].add(res)
else:
obj = cls(subject['type'], subject['comment'], subject['choice'
], subject['res'])
obj.save()
@classmethod
def filter_question(cls):
id_l = os.listdir(settings.QUESTION_PATH)
r_id_l = random.sample(id_l, 3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' % (self.type, self.comment)
class Customer(Base):
DB_PATH = settings.CUSTOMER_PATH
def __init__(self, name, sex, age, phone):
self.id = common.create_id()
self.name = name
self.sex = sex
self.age = age
self.phone = phone
class Record(Base):
DB_PATH = settings.RECORD_PATH
def __init__(self, customer_id, record_list, total_score):
self.id = common.create_id()
self.customer_id = customer_id
self.record_list = record_list
self.total_score = total_score
self.sub_time = time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls, phone):
records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj = Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH = settings.PRIZE_PATH
def __init__(self, name):
self.id = common.create_id()
self.name = name
@classmethod
def create_prize(cls):
while True:
name = input('奖品名: ').strip()
if not name:
continue
obj = Prize(name)
obj.save()
choice = input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls, name):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' % self.name
class Customer2Prize(Base):
DB_PATH = settings.C2P_PATH
def __init__(self, customer_id, prize_id):
self.id = common.create_id()
self.customer_id = customer_id
self.prize_id = prize_id
@classmethod
def get_obj_by_customer_id(cls, customer_id):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls, customer_id):
"""
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
"""
num = random.randint(1, 100)
if num == 1:
prize_name = '欧洲十国游'
if num > 1 and num <= 11:
prize_name = 'mac电脑'
if num > 11 and num <= 61:
prize_name = '珍藏版alex写真集一套'
if num > 61:
prize_name = 'egon签名一个'
prize = Prize.get_obj_by_name(prize_name)
obj = cls(customer_id, prize.id)
obj.save()
return prize_name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Base:
def save(self):
file_path = '%s/%s' % (self.DB_PATH, self.id)
pickle.dump(self, open(file_path, 'wb'))
@classmethod
def get_obj_by_id(cls, id):
file_path = '%s/%s' % (cls.DB_PATH, id)
return pickle.load(open(file_path, 'rb'))
class Subject(Base):
DB_PATH = settings.QUESTION_PATH
def __init__(self, type, comment, choice, right_res, score=5):
self.id = common.create_id()
self.type = type
self.comment = comment
self.choice = choice
self.right_res = right_res
self.score = score
@classmethod
def create_from_file(cls, src_file):
data = xlrd.open_workbook(src_file)
table = data.sheets()[0]
subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}
for i in range(2, table.nrows):
row = table.row_values(i)
if len(subject['choice']) == 4:
obj = cls(subject['type'], subject['comment'], subject[
'choice'], subject['res'])
obj.save()
subject = {'type': None, 'comment': None, 'choice': [],
'res': set()}
if row[0]:
subject['type'] = row[0]
subject['comment'] = row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str = row[2].strip()
res = res_str[0].upper()
subject['res'].add(res)
else:
obj = cls(subject['type'], subject['comment'], subject['choice'
], subject['res'])
obj.save()
@classmethod
def filter_question(cls):
id_l = os.listdir(settings.QUESTION_PATH)
r_id_l = random.sample(id_l, 3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' % (self.type, self.comment)
class Customer(Base):
DB_PATH = settings.CUSTOMER_PATH
def __init__(self, name, sex, age, phone):
self.id = common.create_id()
self.name = name
self.sex = sex
self.age = age
self.phone = phone
class Record(Base):
DB_PATH = settings.RECORD_PATH
def __init__(self, customer_id, record_list, total_score):
self.id = common.create_id()
self.customer_id = customer_id
self.record_list = record_list
self.total_score = total_score
self.sub_time = time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls, phone):
records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj = Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH = settings.PRIZE_PATH
def __init__(self, name):
self.id = common.create_id()
self.name = name
@classmethod
def create_prize(cls):
while True:
name = input('奖品名: ').strip()
if not name:
continue
obj = Prize(name)
obj.save()
choice = input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls, name):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' % self.name
class Customer2Prize(Base):
DB_PATH = settings.C2P_PATH
def __init__(self, customer_id, prize_id):
self.id = common.create_id()
self.customer_id = customer_id
self.prize_id = prize_id
@classmethod
def get_obj_by_customer_id(cls, customer_id):
prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls, customer_id):
"""
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
"""
num = random.randint(1, 100)
if num == 1:
prize_name = '欧洲十国游'
if num > 1 and num <= 11:
prize_name = 'mac电脑'
if num > 11 and num <= 61:
prize_name = '珍藏版alex写真集一套'
if num > 61:
prize_name = 'egon签名一个'
prize = Prize.get_obj_by_name(prize_name)
obj = cls(customer_id, prize.id)
obj.save()
return prize_name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# import os,sys
# BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.append(BASE_DIR)
from lib import common
from conf import settings
import random
import pickle
import os
import xlrd
import time
class Base:
def save(self):
file_path=r'%s/%s' %(self.DB_PATH,self.id)
pickle.dump(self,open(file_path,'wb'))
@classmethod
def get_obj_by_id(cls,id):
file_path=r'%s/%s' %(cls.DB_PATH,id)
return pickle.load(open(file_path,'rb'))
class Subject(Base):
DB_PATH=settings.QUESTION_PATH
def __init__(self,type,comment,choice,right_res,score=5):
self.id=common.create_id()
self.type=type
self.comment=comment
self.choice=choice
self.right_res=right_res
self.score=score
@classmethod
def create_from_file(cls,src_file):
data=xlrd.open_workbook(src_file)
table=data.sheets()[0]
subject={
'type':None,
'comment':None,
'choice':[],
'res':set(),
}
for i in range(2,table.nrows):
row=table.row_values(i)
if len(subject['choice'])==4:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
subject={
'type':None,
'comment':None,
'choice':[],
'res':set()
}
if row[0]:
subject['type']=row[0]
subject['comment']=row[1]
else:
subject.setdefault('choice').append(row[2])
if row[3] == 1:
res_str=row[2].strip()
res=res_str[0].upper()
subject['res'].add(res)
else:
obj=cls(
subject['type'],
subject['comment'],
subject['choice'],
subject['res']
)
obj.save()
@classmethod
def filter_question(cls):
id_l=os.listdir(settings.QUESTION_PATH)
r_id_l=random.sample(id_l,3)
return [cls.get_obj_by_id(id) for id in r_id_l]
def __str__(self):
return '<type: %s comment: %s>' %(self.type,self.comment)
class Customer(Base):
DB_PATH=settings.CUSTOMER_PATH
def __init__(self,name,sex,age,phone):
self.id=common.create_id()
self.name=name
self.sex=sex
self.age=age
self.phone=phone
class Record(Base):
DB_PATH=settings.RECORD_PATH
def __init__(self,customer_id,record_list,total_score):
self.id=common.create_id()
self.customer_id=customer_id
self.record_list=record_list
self.total_score=total_score
self.sub_time=time.strftime('%Y-%m-%d %X')
@classmethod
def get_obj_by_phone(cls,phone):
records=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for record in records:
customer_obj=Customer.get_obj_by_id(record.customer_id)
if phone == customer_obj.phone:
return record
class Prize(Base):
DB_PATH=settings.PRIZE_PATH
def __init__(self,name):
self.id=common.create_id()
self.name=name
@classmethod
def create_prize(cls):
while True:
name=input('奖品名: ').strip()
if not name:continue
obj=Prize(name)
obj.save()
choice=input('继续(Y/N)?: ').strip()
if choice == 'N' or choice == 'n':
break
@classmethod
def get_obj_by_name(cls,name):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.name == name:
return prize
def __str__(self):
return '<%s>' %self.name
class Customer2Prize(Base):
DB_PATH=settings.C2P_PATH
def __init__(self,customer_id,prize_id):
self.id=common.create_id()
self.customer_id=customer_id
self.prize_id=prize_id
@classmethod
def get_obj_by_customer_id(cls,customer_id):
prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))
for prize in prizes:
if prize.customer_id == customer_id:
return prize
@classmethod
def draw_prize(cls,customer_id):
'''
奖品概率:
0/100 欧洲十国游
1/100 iphone7 plus
10/100 mac电脑
50/100 珍藏版alex写真集一套
39/100 egon签名一个
'''
num=random.randint(1,100)
if num == 1:
# 1/100 iphone7 plus
prize_name='欧洲十国游'
if num >1 and num <=11:
# mac电脑
prize_name='mac电脑'
if num > 11 and num <=61:
# 珍藏版alex写真集一套
prize_name='珍藏版alex写真集一套'
if num > 61:
# egon签名一个
prize_name='egon签名一个'
prize=Prize.get_obj_by_name(prize_name)
obj=cls(customer_id,prize.id)
obj.save()
return prize_name
if __name__ == '__main__':
# Subject.create_from_file(r'/Users/jieli/PycharmProjects/爬虫/t1/AnswerSys/test.xlsx')
# res=Subject.filter_question()
# for i in res:
# print(i)
Prize.create_prize()
|
flexible
|
{
"blob_id": "7cd6a8a106c21e8e377666d584e19d30c607b7d2",
"index": 9345,
"step-1": "<mask token>\n\n\nclass Subject(Base):\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Subject(Base):\n <mask token>\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Subject(Base):\n DB_PATH = settings.QUESTION_PATH\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Base:\n\n def save(self):\n file_path = '%s/%s' % (self.DB_PATH, self.id)\n pickle.dump(self, open(file_path, 'wb'))\n\n @classmethod\n def get_obj_by_id(cls, id):\n file_path = '%s/%s' % (cls.DB_PATH, id)\n return pickle.load(open(file_path, 'rb'))\n\n\nclass Subject(Base):\n DB_PATH = settings.QUESTION_PATH\n\n def __init__(self, type, comment, choice, right_res, score=5):\n self.id = common.create_id()\n self.type = type\n self.comment = comment\n self.choice = choice\n self.right_res = right_res\n self.score = score\n\n @classmethod\n def create_from_file(cls, src_file):\n data = xlrd.open_workbook(src_file)\n table = data.sheets()[0]\n subject = {'type': None, 'comment': None, 'choice': [], 'res': set()}\n for i in range(2, table.nrows):\n row = table.row_values(i)\n if len(subject['choice']) == 4:\n obj = cls(subject['type'], subject['comment'], subject[\n 'choice'], subject['res'])\n obj.save()\n subject = {'type': None, 'comment': None, 'choice': [],\n 'res': set()}\n if row[0]:\n subject['type'] = row[0]\n subject['comment'] = row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str = row[2].strip()\n res = res_str[0].upper()\n subject['res'].add(res)\n else:\n obj = cls(subject['type'], subject['comment'], subject['choice'\n ], subject['res'])\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l = os.listdir(settings.QUESTION_PATH)\n r_id_l = random.sample(id_l, 3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' % (self.type, self.comment)\n\n\nclass Customer(Base):\n DB_PATH = settings.CUSTOMER_PATH\n\n def __init__(self, name, sex, age, phone):\n self.id = common.create_id()\n self.name = name\n self.sex = sex\n self.age = age\n self.phone = phone\n\n\nclass Record(Base):\n DB_PATH = settings.RECORD_PATH\n\n def __init__(self, customer_id, record_list, total_score):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.record_list = record_list\n self.total_score = total_score\n self.sub_time = time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls, phone):\n records = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj = Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH = settings.PRIZE_PATH\n\n def __init__(self, name):\n self.id = common.create_id()\n self.name = name\n\n @classmethod\n def create_prize(cls):\n while True:\n name = input('奖品名: ').strip()\n if not name:\n continue\n obj = Prize(name)\n obj.save()\n choice = input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls, name):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' % self.name\n\n\nclass Customer2Prize(Base):\n DB_PATH = settings.C2P_PATH\n\n def __init__(self, customer_id, prize_id):\n self.id = common.create_id()\n self.customer_id = customer_id\n self.prize_id = prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls, customer_id):\n prizes = (cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls, customer_id):\n \"\"\"\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n \"\"\"\n num = random.randint(1, 100)\n if num == 1:\n prize_name = '欧洲十国游'\n if num > 1 and num <= 11:\n prize_name = 'mac电脑'\n if num > 11 and num <= 61:\n prize_name = '珍藏版alex写真集一套'\n if num > 61:\n prize_name = 'egon签名一个'\n prize = Prize.get_obj_by_name(prize_name)\n obj = cls(customer_id, prize.id)\n obj.save()\n return prize_name\n\n\n<mask token>\n",
"step-5": "# import os,sys\n# BASE_DIR=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# sys.path.append(BASE_DIR)\n\nfrom lib import common\nfrom conf import settings\nimport random\nimport pickle\nimport os\nimport xlrd\nimport time\n\nclass Base:\n def save(self):\n file_path=r'%s/%s' %(self.DB_PATH,self.id)\n pickle.dump(self,open(file_path,'wb'))\n\n @classmethod\n def get_obj_by_id(cls,id):\n file_path=r'%s/%s' %(cls.DB_PATH,id)\n return pickle.load(open(file_path,'rb'))\n\nclass Subject(Base):\n DB_PATH=settings.QUESTION_PATH\n def __init__(self,type,comment,choice,right_res,score=5):\n self.id=common.create_id()\n self.type=type\n self.comment=comment\n self.choice=choice\n self.right_res=right_res\n self.score=score\n\n\n @classmethod\n def create_from_file(cls,src_file):\n data=xlrd.open_workbook(src_file)\n table=data.sheets()[0]\n subject={\n 'type':None,\n 'comment':None,\n 'choice':[],\n 'res':set(),\n }\n for i in range(2,table.nrows):\n row=table.row_values(i)\n if len(subject['choice'])==4:\n obj=cls(\n subject['type'],\n subject['comment'],\n subject['choice'],\n subject['res']\n )\n obj.save()\n subject={\n 'type':None,\n 'comment':None,\n 'choice':[],\n 'res':set()\n }\n if row[0]:\n subject['type']=row[0]\n subject['comment']=row[1]\n else:\n subject.setdefault('choice').append(row[2])\n if row[3] == 1:\n res_str=row[2].strip()\n res=res_str[0].upper()\n subject['res'].add(res)\n\n else:\n obj=cls(\n subject['type'],\n subject['comment'],\n subject['choice'],\n subject['res']\n )\n obj.save()\n\n @classmethod\n def filter_question(cls):\n id_l=os.listdir(settings.QUESTION_PATH)\n r_id_l=random.sample(id_l,3)\n return [cls.get_obj_by_id(id) for id in r_id_l]\n\n def __str__(self):\n return '<type: %s comment: %s>' %(self.type,self.comment)\n\n\nclass Customer(Base):\n DB_PATH=settings.CUSTOMER_PATH\n def __init__(self,name,sex,age,phone):\n self.id=common.create_id()\n self.name=name\n self.sex=sex\n self.age=age\n self.phone=phone\n\n\nclass Record(Base):\n DB_PATH=settings.RECORD_PATH\n def __init__(self,customer_id,record_list,total_score):\n self.id=common.create_id()\n self.customer_id=customer_id\n self.record_list=record_list\n self.total_score=total_score\n self.sub_time=time.strftime('%Y-%m-%d %X')\n\n @classmethod\n def get_obj_by_phone(cls,phone):\n records=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for record in records:\n customer_obj=Customer.get_obj_by_id(record.customer_id)\n if phone == customer_obj.phone:\n return record\n\n\nclass Prize(Base):\n DB_PATH=settings.PRIZE_PATH\n def __init__(self,name):\n self.id=common.create_id()\n self.name=name\n\n @classmethod\n def create_prize(cls):\n while True:\n name=input('奖品名: ').strip()\n if not name:continue\n obj=Prize(name)\n obj.save()\n choice=input('继续(Y/N)?: ').strip()\n if choice == 'N' or choice == 'n':\n break\n\n @classmethod\n def get_obj_by_name(cls,name):\n prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.name == name:\n return prize\n\n def __str__(self):\n return '<%s>' %self.name\n\nclass Customer2Prize(Base):\n DB_PATH=settings.C2P_PATH\n def __init__(self,customer_id,prize_id):\n self.id=common.create_id()\n self.customer_id=customer_id\n self.prize_id=prize_id\n\n @classmethod\n def get_obj_by_customer_id(cls,customer_id):\n prizes=(cls.get_obj_by_id(id) for id in os.listdir(cls.DB_PATH))\n for prize in prizes:\n if prize.customer_id == customer_id:\n return prize\n\n @classmethod\n def draw_prize(cls,customer_id):\n '''\n 奖品概率:\n 0/100 欧洲十国游\n 1/100 iphone7 plus\n 10/100 mac电脑\n 50/100 珍藏版alex写真集一套\n 39/100 egon签名一个\n '''\n num=random.randint(1,100)\n\n if num == 1:\n # 1/100 iphone7 plus\n prize_name='欧洲十国游'\n\n if num >1 and num <=11:\n # mac电脑\n prize_name='mac电脑'\n if num > 11 and num <=61:\n # 珍藏版alex写真集一套\n prize_name='珍藏版alex写真集一套'\n if num > 61:\n # egon签名一个\n prize_name='egon签名一个'\n prize=Prize.get_obj_by_name(prize_name)\n obj=cls(customer_id,prize.id)\n obj.save()\n return prize_name\n\nif __name__ == '__main__':\n\n # Subject.create_from_file(r'/Users/jieli/PycharmProjects/爬虫/t1/AnswerSys/test.xlsx')\n # res=Subject.filter_question()\n # for i in res:\n # print(i)\n\n Prize.create_prize()",
"step-ids": [
21,
23,
24,
27,
30
]
}
|
[
21,
23,
24,
27,
30
] |
t = int(input())
m = 0
while(m < t):
n = int(input())
arr = list(map(int, input().strip().split(" ")))
s = int(input())
hash_map = {}
curr_sum = 0
count = 0
for i in range(len(arr)):
curr_sum += arr[i]
if curr_sum == s:
count += 1
if curr_sum - s in hash_map:
count += hash_map[curr_sum - s]
if curr_sum not in hash_map:
hash_map[curr_sum] = 0
hash_map[curr_sum] += 1
print(count)
m += 1
|
normal
|
{
"blob_id": "3ac69068db94f45bc44a8295a10603126d004b34",
"index": 6219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile m < t:\n n = int(input())\n arr = list(map(int, input().strip().split(' ')))\n s = int(input())\n hash_map = {}\n curr_sum = 0\n count = 0\n for i in range(len(arr)):\n curr_sum += arr[i]\n if curr_sum == s:\n count += 1\n if curr_sum - s in hash_map:\n count += hash_map[curr_sum - s]\n if curr_sum not in hash_map:\n hash_map[curr_sum] = 0\n hash_map[curr_sum] += 1\n print(count)\n m += 1\n",
"step-3": "t = int(input())\nm = 0\nwhile m < t:\n n = int(input())\n arr = list(map(int, input().strip().split(' ')))\n s = int(input())\n hash_map = {}\n curr_sum = 0\n count = 0\n for i in range(len(arr)):\n curr_sum += arr[i]\n if curr_sum == s:\n count += 1\n if curr_sum - s in hash_map:\n count += hash_map[curr_sum - s]\n if curr_sum not in hash_map:\n hash_map[curr_sum] = 0\n hash_map[curr_sum] += 1\n print(count)\n m += 1\n",
"step-4": "t = int(input())\nm = 0\nwhile(m < t):\n n = int(input())\n arr = list(map(int, input().strip().split(\" \")))\n s = int(input())\n hash_map = {}\n curr_sum = 0\n count = 0\n for i in range(len(arr)):\n curr_sum += arr[i]\n if curr_sum == s:\n count += 1\n if curr_sum - s in hash_map:\n count += hash_map[curr_sum - s]\n if curr_sum not in hash_map:\n hash_map[curr_sum] = 0\n hash_map[curr_sum] += 1\n print(count)\n \n \n \n m += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
from aws_cdk import core
import os
from ec2_ialb_aga_custom_r53.network_stack import NetworkingStack
from ec2_ialb_aga_custom_r53.aga_stack import AgaStack
from ec2_ialb_aga_custom_r53.alb_stack import ALBStack
from ec2_ialb_aga_custom_r53.certs_stack import CertsStack
from ec2_ialb_aga_custom_r53.ec2_stack import EC2Stack
deploy_env = core.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"])
# These need to be injected at synth/deployment time
CIDR = os.getenv("VPC_CIDR", "")
DOMAIN = os.getenv("R53_DOMAIN", "")
SUB_DOMAIN = "code-server"
app = core.App()
net = NetworkingStack(app, "GravitonBlog-NetworkingStack", CIDR, env=deploy_env)
ec2 = EC2Stack(app, "GravitonBlog-EC2Stack", net.vpc, env=deploy_env)
ec2.add_dependency(net)
cert = CertsStack(app, "GravitonBlog-CertsStack",
DOMAIN, SUB_DOMAIN, env=deploy_env)
alb = ALBStack(app, "GravitonBlog-ALBStack", net.vpc, ec2.instance,
cert.domain_cert, env=deploy_env)
alb.add_dependency(net)
alb.add_dependency(ec2)
alb.add_dependency(cert)
aga = AgaStack(app, "GravitonBlog-AGAStack", net.vpc, alb.alb,
cert.blog_hosted_zone, SUB_DOMAIN, env=deploy_env)
aga.add_dependency(net)
aga.add_dependency(cert)
aga.add_dependency(alb)
app.synth()
|
normal
|
{
"blob_id": "2f96e58a825744ae6baafd1bfb936210500f0fd0",
"index": 6334,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nec2.add_dependency(net)\n<mask token>\nalb.add_dependency(net)\nalb.add_dependency(ec2)\nalb.add_dependency(cert)\n<mask token>\naga.add_dependency(net)\naga.add_dependency(cert)\naga.add_dependency(alb)\napp.synth()\n",
"step-3": "<mask token>\ndeploy_env = core.Environment(account=os.environ['CDK_DEFAULT_ACCOUNT'],\n region=os.environ['CDK_DEFAULT_REGION'])\nCIDR = os.getenv('VPC_CIDR', '')\nDOMAIN = os.getenv('R53_DOMAIN', '')\nSUB_DOMAIN = 'code-server'\napp = core.App()\nnet = NetworkingStack(app, 'GravitonBlog-NetworkingStack', CIDR, env=deploy_env\n )\nec2 = EC2Stack(app, 'GravitonBlog-EC2Stack', net.vpc, env=deploy_env)\nec2.add_dependency(net)\ncert = CertsStack(app, 'GravitonBlog-CertsStack', DOMAIN, SUB_DOMAIN, env=\n deploy_env)\nalb = ALBStack(app, 'GravitonBlog-ALBStack', net.vpc, ec2.instance, cert.\n domain_cert, env=deploy_env)\nalb.add_dependency(net)\nalb.add_dependency(ec2)\nalb.add_dependency(cert)\naga = AgaStack(app, 'GravitonBlog-AGAStack', net.vpc, alb.alb, cert.\n blog_hosted_zone, SUB_DOMAIN, env=deploy_env)\naga.add_dependency(net)\naga.add_dependency(cert)\naga.add_dependency(alb)\napp.synth()\n",
"step-4": "from aws_cdk import core\nimport os\nfrom ec2_ialb_aga_custom_r53.network_stack import NetworkingStack\nfrom ec2_ialb_aga_custom_r53.aga_stack import AgaStack\nfrom ec2_ialb_aga_custom_r53.alb_stack import ALBStack\nfrom ec2_ialb_aga_custom_r53.certs_stack import CertsStack\nfrom ec2_ialb_aga_custom_r53.ec2_stack import EC2Stack\ndeploy_env = core.Environment(account=os.environ['CDK_DEFAULT_ACCOUNT'],\n region=os.environ['CDK_DEFAULT_REGION'])\nCIDR = os.getenv('VPC_CIDR', '')\nDOMAIN = os.getenv('R53_DOMAIN', '')\nSUB_DOMAIN = 'code-server'\napp = core.App()\nnet = NetworkingStack(app, 'GravitonBlog-NetworkingStack', CIDR, env=deploy_env\n )\nec2 = EC2Stack(app, 'GravitonBlog-EC2Stack', net.vpc, env=deploy_env)\nec2.add_dependency(net)\ncert = CertsStack(app, 'GravitonBlog-CertsStack', DOMAIN, SUB_DOMAIN, env=\n deploy_env)\nalb = ALBStack(app, 'GravitonBlog-ALBStack', net.vpc, ec2.instance, cert.\n domain_cert, env=deploy_env)\nalb.add_dependency(net)\nalb.add_dependency(ec2)\nalb.add_dependency(cert)\naga = AgaStack(app, 'GravitonBlog-AGAStack', net.vpc, alb.alb, cert.\n blog_hosted_zone, SUB_DOMAIN, env=deploy_env)\naga.add_dependency(net)\naga.add_dependency(cert)\naga.add_dependency(alb)\napp.synth()\n",
"step-5": "#!/usr/bin/env python3\n\nfrom aws_cdk import core\nimport os\n\nfrom ec2_ialb_aga_custom_r53.network_stack import NetworkingStack\nfrom ec2_ialb_aga_custom_r53.aga_stack import AgaStack\nfrom ec2_ialb_aga_custom_r53.alb_stack import ALBStack\nfrom ec2_ialb_aga_custom_r53.certs_stack import CertsStack\nfrom ec2_ialb_aga_custom_r53.ec2_stack import EC2Stack\n\ndeploy_env = core.Environment(\n account=os.environ[\"CDK_DEFAULT_ACCOUNT\"],\n region=os.environ[\"CDK_DEFAULT_REGION\"])\n\n# These need to be injected at synth/deployment time\nCIDR = os.getenv(\"VPC_CIDR\", \"\")\nDOMAIN = os.getenv(\"R53_DOMAIN\", \"\")\nSUB_DOMAIN = \"code-server\"\n\napp = core.App()\n\nnet = NetworkingStack(app, \"GravitonBlog-NetworkingStack\", CIDR, env=deploy_env)\n\nec2 = EC2Stack(app, \"GravitonBlog-EC2Stack\", net.vpc, env=deploy_env)\nec2.add_dependency(net)\n\ncert = CertsStack(app, \"GravitonBlog-CertsStack\",\n DOMAIN, SUB_DOMAIN, env=deploy_env)\n\nalb = ALBStack(app, \"GravitonBlog-ALBStack\", net.vpc, ec2.instance,\n cert.domain_cert, env=deploy_env)\nalb.add_dependency(net)\nalb.add_dependency(ec2)\nalb.add_dependency(cert)\n\naga = AgaStack(app, \"GravitonBlog-AGAStack\", net.vpc, alb.alb,\n cert.blog_hosted_zone, SUB_DOMAIN, env=deploy_env)\naga.add_dependency(net)\naga.add_dependency(cert)\naga.add_dependency(alb)\n\napp.synth()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# NumPy(Numerical Python) 是 Python 语言的一个扩展程序库,
# 支持大量的维度数组与矩阵运算,此外也针对数组运算提供大量的数学函数库。
|
normal
|
{
"blob_id": "94348aed0585024c70062e9201fb41aae2122625",
"index": 9331,
"step-1": "# NumPy(Numerical Python) 是 Python 语言的一个扩展程序库,\r\n# 支持大量的维度数组与矩阵运算,此外也针对数组运算提供大量的数学函数库。",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in
values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
<|reserved_special_token_1|>
import random
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val = 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in
values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
<|reserved_special_token_1|>
import random
#quicksort a list of objects based on keys, which can be any of 3 values
# done in O(n) time in one pass, and O(1) additional space complexity
def quicksort(x, pivot_index):
key1_idx, key2_idx, key3_idx = 0, 0, len(x)
key1_val, key2_val= 'key1', 'key2'
while key2_idx < key3_idx:
if x[key2_idx]['key'] == key1_val:
x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]
key1_idx, key2_idx = key1_idx + 1, key2_idx + 1
elif x[key2_idx]['key'] == key2_val:
key2_idx += 1
else:
key3_idx -= 1
x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]
return x
if __name__ == '__main__':
keys = ['key1', 'key2', 'key3']
values = [0, 1, 2, 3, 4]
key_values = [{'key': key, 'value': value} for key in keys for value in values]
random.shuffle(key_values)
print(quicksort(key_values, 7))
|
flexible
|
{
"blob_id": "f193094c551df2a32860948b1a8710b53ca0dfb6",
"index": 2413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n",
"step-4": "import random\n\n\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val = 'key1', 'key2'\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n return x\n\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n key_values = [{'key': key, 'value': value} for key in keys for value in\n values]\n random.shuffle(key_values)\n print(quicksort(key_values, 7))\n",
"step-5": "import random\n\n#quicksort a list of objects based on keys, which can be any of 3 values\n# done in O(n) time in one pass, and O(1) additional space complexity\ndef quicksort(x, pivot_index):\n key1_idx, key2_idx, key3_idx = 0, 0, len(x)\n key1_val, key2_val= 'key1', 'key2'\n\n while key2_idx < key3_idx:\n if x[key2_idx]['key'] == key1_val:\n x[key1_idx], x[key2_idx] = x[key2_idx], x[key1_idx]\n key1_idx, key2_idx = key1_idx + 1, key2_idx + 1\n elif x[key2_idx]['key'] == key2_val:\n key2_idx += 1\n else:\n key3_idx -= 1\n x[key2_idx], x[key3_idx] = x[key3_idx], x[key2_idx]\n\n return x\n\nif __name__ == '__main__':\n keys = ['key1', 'key2', 'key3']\n values = [0, 1, 2, 3, 4]\n\n key_values = [{'key': key, 'value': value} for key in keys for value in values]\n random.shuffle(key_values)\n\n print(quicksort(key_values, 7))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
def bst_contains(root: BinarySearchTreeNode, number):
if root is None:
return 0
if(root.data == number):
return 1
elif(root.data < number):
#si int es mas grande que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.right, number)
elif(root.data > number):
#si int es mas pequeno que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.left, number)
|
normal
|
{
"blob_id": "3bdf3a48451b83347a6c9a9851b5b85b608f0b63",
"index": 2826,
"step-1": "<mask token>\n",
"step-2": "class BinarySearchTreeNode:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\n<mask token>\n",
"step-4": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n if root.data == number:\n return 1\n elif root.data < number:\n return bst_contains(root.right, number)\n elif root.data > number:\n return bst_contains(root.left, number)\n",
"step-5": "class BinarySearchTreeNode:\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n\n if(root.data == number):\n return 1\n elif(root.data < number):\n #si int es mas grande que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.right, number)\n\n elif(root.data > number):\n #si int es mas pequeno que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.left, number)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_type, suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file, sep='\t', index_col=4)
dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',
'strand', 'DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,
suffix, dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_y, suffix)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre
].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *
dci_thre].index
dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)
dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)
plt.figure(figsize=(2.1, 2.1))
plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=
'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')
plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,
'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,
'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))
x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,
dci_thre)
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)
plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,
transform=plt.axes().transAxes, ha='right', va='top')
plt.axhline(y=0, c='k', lw=1)
plt.axvline(x=0, c='k', lw=1)
plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,
handletextpad=0.1, handlelength=1, loc='upper left',
markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)
xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[
compr_x.split('_')[-1]]
ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[
compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,
subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),
bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
return up_bins, dn_bins
return [], []
def plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,
dci_thre, num_DCI_bins_df, flag):
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals, sig_colors = [], []
for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',
'TPR_over_WT']:
dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
dci_df.loc[selected_bins].to_csv(
'{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre, compr_col))
s, p = stats.ttest_1samp(box_val, 0)
sig_vals.append('*' if p < 0.05 else '')
sig_colors.append('b' if s < 0 else 'r')
box_vals.append(box_val)
xa, xb = cellType_labels[compr_col.split('_')[0]
], cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa, xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} s'.format(title.split()[2],
compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} p'.format(title.split()[2],
compr_col)] = '{:.2e}'.format(p)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))
g = plt.boxplot(box_vals, positions=positions, widths=0.5,
patch_artist=True, boxprops=dict(color='k', facecolor='w', fill
=None, lw=1), medianprops=dict(color='k'), showfliers=False)
plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',
fontsize=12)
plt.ylabel('DCI score'.format(hm_mark), fontsize=13)
for ii in positions:
plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],
color='red', s=77)
plt.axhline(y=0, c='k', lw=1)
plt.title(title, fontsize=12)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',
pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sns.set(font_scale=1.1)
sns.set_style('whitegrid', {'axes.grid': False})
sns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})
<|reserved_special_token_0|>
def return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_type, suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file, sep='\t', index_col=4)
dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',
'strand', 'DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,
suffix, dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_y, suffix)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre
].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *
dci_thre].index
dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)
dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)
plt.figure(figsize=(2.1, 2.1))
plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=
'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')
plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,
'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,
'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))
x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,
dci_thre)
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)
plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,
transform=plt.axes().transAxes, ha='right', va='top')
plt.axhline(y=0, c='k', lw=1)
plt.axvline(x=0, c='k', lw=1)
plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,
handletextpad=0.1, handlelength=1, loc='upper left',
markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)
xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[
compr_x.split('_')[-1]]
ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[
compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,
subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),
bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
return up_bins, dn_bins
return [], []
def plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,
dci_thre, num_DCI_bins_df, flag):
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals, sig_colors = [], []
for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',
'TPR_over_WT']:
dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
dci_df.loc[selected_bins].to_csv(
'{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre, compr_col))
s, p = stats.ttest_1samp(box_val, 0)
sig_vals.append('*' if p < 0.05 else '')
sig_colors.append('b' if s < 0 else 'r')
box_vals.append(box_val)
xa, xb = cellType_labels[compr_col.split('_')[0]
], cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa, xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} s'.format(title.split()[2],
compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} p'.format(title.split()[2],
compr_col)] = '{:.2e}'.format(p)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))
g = plt.boxplot(box_vals, positions=positions, widths=0.5,
patch_artist=True, boxprops=dict(color='k', facecolor='w', fill
=None, lw=1), medianprops=dict(color='k'), showfliers=False)
plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',
fontsize=12)
plt.ylabel('DCI score'.format(hm_mark), fontsize=13)
for ii in positions:
plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],
color='red', s=77)
plt.axhline(y=0, c='k', lw=1)
plt.title(title, fontsize=12)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',
pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
<|reserved_special_token_0|>
os.makedirs(outdir, exist_ok=True)
<|reserved_special_token_0|>
for subdir in subdirs[1:2]:
outdir_tmp = '{}/{}'.format(outdir, subdir)
os.makedirs(outdir_tmp, exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,
subdir, hm_mark, compr_type, suffix, dci_thre)
if compr_type[1] == 'DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# up genes'] = len(
up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# dn genes'] = len(
dn_bins)
selected_bins = up_bins
color = 'tab:red'
title = ('Genes w/ DCI$>{}$ \n in WT over Vector'.
format(dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = ('Genes w/ DCI$<{}$ \n in WT over Vector'.
format(-1 * dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'decreased')
num_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.rcParams['font.size'] = 11
<|reserved_special_token_0|>
sns.set(font_scale=1.1)
sns.set_style('whitegrid', {'axes.grid': False})
sns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})
matplotlib.rcParams['font.sans-serif'] = ['Arial']
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Arial'
def return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_type, suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file, sep='\t', index_col=4)
dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',
'strand', 'DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,
suffix, dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_y, suffix)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre
].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *
dci_thre].index
dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)
dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)
plt.figure(figsize=(2.1, 2.1))
plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=
'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')
plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,
'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,
'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))
x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,
dci_thre)
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)
plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,
transform=plt.axes().transAxes, ha='right', va='top')
plt.axhline(y=0, c='k', lw=1)
plt.axvline(x=0, c='k', lw=1)
plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,
handletextpad=0.1, handlelength=1, loc='upper left',
markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)
xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[
compr_x.split('_')[-1]]
ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[
compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,
subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),
bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
return up_bins, dn_bins
return [], []
def plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,
dci_thre, num_DCI_bins_df, flag):
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals, sig_colors = [], []
for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',
'TPR_over_WT']:
dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
dci_df.loc[selected_bins].to_csv(
'{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre, compr_col))
s, p = stats.ttest_1samp(box_val, 0)
sig_vals.append('*' if p < 0.05 else '')
sig_colors.append('b' if s < 0 else 'r')
box_vals.append(box_val)
xa, xb = cellType_labels[compr_col.split('_')[0]
], cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa, xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} s'.format(title.split()[2],
compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} p'.format(title.split()[2],
compr_col)] = '{:.2e}'.format(p)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))
g = plt.boxplot(box_vals, positions=positions, widths=0.5,
patch_artist=True, boxprops=dict(color='k', facecolor='w', fill
=None, lw=1), medianprops=dict(color='k'), showfliers=False)
plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',
fontsize=12)
plt.ylabel('DCI score'.format(hm_mark), fontsize=13)
for ii in positions:
plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],
color='red', s=77)
plt.axhline(y=0, c='k', lw=1)
plt.title(title, fontsize=12)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',
pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
cellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\Delta$cIDR',
'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\Delta$TPR', 'MT2': 'MT2', 'FUS':
'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir, exist_ok=True)
project_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'
DCI_dir = (
'{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'
.format(project_dir))
subdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',
'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]
hm_marks = ['H3K4me3', 'H3K27ac']
suffixes = ['_promoter_DCI']
dci_thres = [2, 5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp = '{}/{}'.format(outdir, subdir)
os.makedirs(outdir_tmp, exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,
subdir, hm_mark, compr_type, suffix, dci_thre)
if compr_type[1] == 'DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# up genes'] = len(
up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# dn genes'] = len(
dn_bins)
selected_bins = up_bins
color = 'tab:red'
title = ('Genes w/ DCI$>{}$ \n in WT over Vector'.
format(dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = ('Genes w/ DCI$<{}$ \n in WT over Vector'.
format(-1 * dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'decreased')
num_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')
<|reserved_special_token_1|>
import sys, argparse
import os, glob
import numpy as np
import pandas as pd
import re, bisect
from scipy import stats
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size'] = 11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style('whitegrid', {'axes.grid': False})
sns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})
matplotlib.rcParams['font.sans-serif'] = ['Arial']
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Arial'
def return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_type, suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file, sep='\t', index_col=4)
dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',
'strand', 'DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,
suffix, dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
compr_y, suffix)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre
].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *
dci_thre].index
dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)
dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)
plt.figure(figsize=(2.1, 2.1))
plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=
'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')
plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,
'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,
'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=
'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))
x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,
dci_thre)
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)
plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,
transform=plt.axes().transAxes, ha='right', va='top')
plt.axhline(y=0, c='k', lw=1)
plt.axvline(x=0, c='k', lw=1)
plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,
handletextpad=0.1, handlelength=1, loc='upper left',
markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)
xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[
compr_x.split('_')[-1]]
ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[
compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,
subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),
bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
return up_bins, dn_bins
return [], []
def plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,
dci_thre, num_DCI_bins_df, flag):
test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,
'WT_over_Vector', suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals, sig_colors = [], []
for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',
'TPR_over_WT']:
dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
dci_df.loc[selected_bins].to_csv(
'{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre, compr_col))
s, p = stats.ttest_1samp(box_val, 0)
sig_vals.append('*' if p < 0.05 else '')
sig_colors.append('b' if s < 0 else 'r')
box_vals.append(box_val)
xa, xb = cellType_labels[compr_col.split('_')[0]
], cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa, xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} s'.format(title.split()[2],
compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,
suffix, dci_thre), '{} {} p'.format(title.split()[2],
compr_col)] = '{:.2e}'.format(p)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))
g = plt.boxplot(box_vals, positions=positions, widths=0.5,
patch_artist=True, boxprops=dict(color='k', facecolor='w', fill
=None, lw=1), medianprops=dict(color='k'), showfliers=False)
plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',
fontsize=12)
plt.ylabel('DCI score'.format(hm_mark), fontsize=13)
for ii in positions:
plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],
color='red', s=77)
plt.axhline(y=0, c='k', lw=1)
plt.title(title, fontsize=12)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,
subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',
pad_inches=0.1, dpi=600, transparent=True)
plt.show()
plt.close()
cellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\Delta$cIDR',
'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\Delta$TPR', 'MT2': 'MT2', 'FUS':
'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir, exist_ok=True)
project_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'
DCI_dir = (
'{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'
.format(project_dir))
subdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',
'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]
hm_marks = ['H3K4me3', 'H3K27ac']
suffixes = ['_promoter_DCI']
dci_thres = [2, 5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp = '{}/{}'.format(outdir, subdir)
os.makedirs(outdir_tmp, exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,
subdir, hm_mark, compr_type, suffix, dci_thre)
if compr_type[1] == 'DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# up genes'] = len(
up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,
hm_mark, suffix, dci_thre), '# dn genes'] = len(
dn_bins)
selected_bins = up_bins
color = 'tab:red'
title = ('Genes w/ DCI$>{}$ \n in WT over Vector'.
format(dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = ('Genes w/ DCI$<{}$ \n in WT over Vector'.
format(-1 * dci_thre))
plot_box_figs(subdir, hm_mark, suffix,
selected_bins, color, title, dci_thre,
num_DCI_bins_df, 'decreased')
num_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')
<|reserved_special_token_1|>
import sys,argparse
import os,glob
import numpy as np
import pandas as pd
import re,bisect
from scipy import stats
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=11
import seaborn as sns
sns.set(font_scale=1.1)
sns.set_style("whitegrid", {'axes.grid' : False})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams["mathtext.rm"] = "Arial"
# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
# dci_df = pd.read_csv(dci_file,sep='\t',header=None)
# dci_df.columns=['chr','start','end','DCI']
# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]
# return dci_df
def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):
dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)
if os.path.isfile(dci_file):
dci_df = pd.read_csv(dci_file,sep='\t',index_col=4)
dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI']
return dci_df
else:
return None
def scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):
compr_x = compr_type[0]
compr_y = compr_type[1]
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)
# print(test_file)
if os.path.isfile(test_file):
dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index
dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index
dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)
dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)
# scatter plot
plt.figure(figsize=(2.1,2.1))
plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')
plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))
plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))
# save and plot the correlation
x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value
num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value
x_sort = np.sort(x)
plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)
plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')
plt.axhline(y=0,c='k',lw=1)
plt.axvline(x=0,c='k',lw=1)
# # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))
plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\
handlelength=1,loc="upper left",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)
xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]
ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]
plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)
plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)
plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
return up_bins,dn_bins
return [],[]
def plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):
test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)
if os.path.isfile(test_file):
box_vals = []
xticklabels = []
sig_vals,sig_colors = [],[]
for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:
dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)
if dci_df is not None:
box_val = dci_df.loc[selected_bins]['DCI'].values
# save the values in box plots
dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))
s,p = stats.ttest_1samp(box_val,0)
sig_vals.append('*' if p<0.05 else '')
sig_colors.append('b' if s<0 else 'r')
box_vals.append(box_val)
xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]]
xticklabels.append('{} over {}'.format(xa,xb))
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p)
#print(box_vals)
positions = np.arange(len(box_vals))
fig = plt.figure(figsize=(.46*len(box_vals),2.2))
g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\
boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\
medianprops=dict(color='k'),showfliers=False)
# g = plt.violinplot(box_vals)
# for position_id in np.arange(len(positions)):
# scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))
# plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)
# for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:
# mark_pvalue(compr_pos,positions,box_vals)
plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)
plt.ylabel('DCI score'.format(hm_mark),fontsize=13)
# plt.ylim([-1,2])
for ii in positions:
plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)
# plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')
plt.axhline(y=0,c='k',lw=1)
plt.title(title,fontsize=12)
# plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc="upper right",frameon=False)
plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\
bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)
plt.show()
plt.close()
# ==== main()
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
outdir = 'f4_promoter_DCI_scatter'
os.makedirs(outdir,exist_ok=True)
# project_dir="/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
project_dir="/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang"
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)
DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)
# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)
# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)
# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)
subdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',
'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']
compr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]
hm_marks = ['H3K4me3','H3K27ac']
suffixes=['_promoter_DCI']
dci_thres = [2,5]
num_DCI_bins_df = pd.DataFrame()
for subdir in subdirs[1:2]:
outdir_tmp='{}/{}'.format(outdir,subdir)
os.makedirs(outdir_tmp,exist_ok=True)
for hm_mark in hm_marks[:]:
for suffix in suffixes[:]:
for dci_thre in dci_thres[1:]:
for compr_type in compr_types[:]:
up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre)
# the box plot are exactly the same
if compr_type[1]=='DEL_over_WT':
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins)
num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins)
##### box plot
selected_bins = up_bins
color = 'tab:red'
title = 'Genes w/ DCI$>{}$ \n in WT over Vector'.format(dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')
selected_bins = dn_bins
color = 'tab:blue'
title = 'Genes w/ DCI$<{}$ \n in WT over Vector'.format(-1*dci_thre)
plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')
num_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')
|
flexible
|
{
"blob_id": "4ee47435bff1b0b4a7877c06fb13d13cf53b7fce",
"index": 3910,
"step-1": "<mask token>\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\n<mask token>\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\n<mask token>\nos.makedirs(outdir, exist_ok=True)\n<mask token>\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-3": "<mask token>\nmatplotlib.rcParams['font.size'] = 11\n<mask token>\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\nmatplotlib.rcParams['font.sans-serif'] = ['Arial']\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Arial'\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\ncellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\\\Delta$cIDR',\n 'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\\\Delta$TPR', 'MT2': 'MT2', 'FUS':\n 'UTX-FUS$_{IDR}$'}\noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir, exist_ok=True)\nproject_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'\nDCI_dir = (\n '{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'\n .format(project_dir))\nsubdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']\ncompr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',\n 'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]\nhm_marks = ['H3K4me3', 'H3K27ac']\nsuffixes = ['_promoter_DCI']\ndci_thres = [2, 5]\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-4": "import sys, argparse\nimport os, glob\nimport numpy as np\nimport pandas as pd\nimport re, bisect\nfrom scipy import stats\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size'] = 11\nimport seaborn as sns\nsns.set(font_scale=1.1)\nsns.set_style('whitegrid', {'axes.grid': False})\nsns.set_style('ticks', {'ytick.color': 'k', 'axes.edgecolor': 'k'})\nmatplotlib.rcParams['font.sans-serif'] = ['Arial']\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Arial'\n\n\ndef return_dci_df(DCI_dir, subdir, hm_mark, compr_type, suffix):\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_type, suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file, sep='\\t', index_col=4)\n dci_df.columns = ['chr', 'start', 'end', 'IfOverlap', 'score',\n 'strand', 'DCI']\n return dci_df\n else:\n return None\n\n\ndef scatter_plot_compr_DCI(num_DCI_bins_df, subdir, hm_mark, compr_type,\n suffix, dci_thre):\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n compr_y, suffix)\n if os.path.isfile(test_file):\n dci_df_wt_over_vector = return_dci_df(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] > dci_thre\n ].index\n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI'] < -1 *\n dci_thre].index\n dci_df_x = return_dci_df(DCI_dir, subdir, hm_mark, compr_x, suffix)\n dci_df_y = return_dci_df(DCI_dir, subdir, hm_mark, compr_y, suffix)\n plt.figure(figsize=(2.1, 2.1))\n plt.scatter(dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI'], c=\n 'tab:grey', s=3, alpha=1, rasterized=True, label='All genes')\n plt.scatter(dci_df_x.loc[up_bins, 'DCI'], dci_df_y.loc[up_bins,\n 'DCI'], c='tab:red', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins, 'DCI'], dci_df_y.loc[dn_bins,\n 'DCI'], c='tab:blue', s=3, alpha=1, rasterized=True, label=\n 'Genes w/ DCI$<{}$ in WT/Vector'.format(-1 * dci_thre))\n x, y = dci_df_x.loc[:, 'DCI'], dci_df_y.loc[:, 'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n output_prename = '{}_{}_{}_dci{}'.format(subdir, hm_mark, suffix,\n dci_thre)\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename, 'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort, x_sort * slope + intercept, c='k', ls='--', lw=0.8)\n plt.text(0.97, 0.97, '$r={:.2f}$ '.format(r_value), fontsize=10,\n transform=plt.axes().transAxes, ha='right', va='top')\n plt.axhline(y=0, c='k', lw=1)\n plt.axvline(x=0, c='k', lw=1)\n plt.legend(fontsize=10.5, borderaxespad=0.1, labelspacing=0.1,\n handletextpad=0.1, handlelength=1, loc='upper left',\n markerscale=3, bbox_to_anchor=[-0.12, 1.36], frameon=False)\n xa, xb = cellType_labels[compr_x.split('_')[0]], cellType_labels[\n compr_x.split('_')[-1]]\n ya, yb = cellType_labels[compr_y.split('_')[0]], cellType_labels[\n compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa, xb), fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya, yb), fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,\n subdir, hm_mark, compr_x, compr_y, suffix, dci_thre),\n bbox_inches='tight', pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n return up_bins, dn_bins\n return [], []\n\n\ndef plot_box_figs(subdir, hm_mark, suffix, selected_bins, color, title,\n dci_thre, num_DCI_bins_df, flag):\n test_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir, subdir, hm_mark,\n 'WT_over_Vector', suffix)\n if os.path.isfile(test_file):\n box_vals = []\n xticklabels = []\n sig_vals, sig_colors = [], []\n for compr_col in ['WT_over_Vector', 'DEL_over_WT', 'EIF_over_DEL',\n 'TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir, subdir, hm_mark, compr_col, suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n dci_df.loc[selected_bins].to_csv(\n '{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre, compr_col))\n s, p = stats.ttest_1samp(box_val, 0)\n sig_vals.append('*' if p < 0.05 else '')\n sig_colors.append('b' if s < 0 else 'r')\n box_vals.append(box_val)\n xa, xb = cellType_labels[compr_col.split('_')[0]\n ], cellType_labels[compr_col.split('_')[-1]]\n xticklabels.append('{} over {}'.format(xa, xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} s'.format(title.split()[2],\n compr_col)] = '{:.2f}'.format(s)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir, hm_mark,\n suffix, dci_thre), '{} {} p'.format(title.split()[2],\n compr_col)] = '{:.2e}'.format(p)\n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(0.46 * len(box_vals), 2.2))\n g = plt.boxplot(box_vals, positions=positions, widths=0.5,\n patch_artist=True, boxprops=dict(color='k', facecolor='w', fill\n =None, lw=1), medianprops=dict(color='k'), showfliers=False)\n plt.axes().set_xticklabels(xticklabels, rotation=30, ha='right',\n fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark), fontsize=13)\n for ii in positions:\n plt.scatter(ii, np.median(box_vals[ii]), marker=sig_vals[ii],\n color='red', s=77)\n plt.axhline(y=0, c='k', lw=1)\n plt.title(title, fontsize=12)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,\n subdir, hm_mark, flag, suffix, dci_thre), bbox_inches='tight',\n pad_inches=0.1, dpi=600, transparent=True)\n plt.show()\n plt.close()\n\n\ncellType_labels = {'Vector': 'Vector', 'WT': 'WT', 'DEL': '$\\\\Delta$cIDR',\n 'EIF': 'UTX-eIF$_{IDR}$', 'TPR': '$\\\\Delta$TPR', 'MT2': 'MT2', 'FUS':\n 'UTX-FUS$_{IDR}$'}\noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir, exist_ok=True)\nproject_dir = '/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang'\nDCI_dir = (\n '{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'\n .format(project_dir))\nsubdirs = ['bart3d_dis200k_data_1st_submit', 'bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit', 'bart3d_dis500k_data202008']\ncompr_types = [['WT_over_Vector', 'DEL_over_WT'], ['DEL_over_WT',\n 'EIF_over_DEL'], ['WT_over_Vector', 'TPR_over_WT']]\nhm_marks = ['H3K4me3', 'H3K27ac']\nsuffixes = ['_promoter_DCI']\ndci_thres = [2, 5]\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]:\n outdir_tmp = '{}/{}'.format(outdir, subdir)\n os.makedirs(outdir_tmp, exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins, dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,\n subdir, hm_mark, compr_type, suffix, dci_thre)\n if compr_type[1] == 'DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# up genes'] = len(\n up_bins)\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,\n hm_mark, suffix, dci_thre), '# dn genes'] = len(\n dn_bins)\n selected_bins = up_bins\n color = 'tab:red'\n title = ('Genes w/ DCI$>{}$ \\n in WT over Vector'.\n format(dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'increased')\n selected_bins = dn_bins\n color = 'tab:blue'\n title = ('Genes w/ DCI$<{}$ \\n in WT over Vector'.\n format(-1 * dci_thre))\n plot_box_figs(subdir, hm_mark, suffix,\n selected_bins, color, title, dci_thre,\n num_DCI_bins_df, 'decreased')\nnum_DCI_bins_df.to_csv(outdir + os.sep + 'num_DCI_promoter_summary.csv')\n",
"step-5": "import sys,argparse\nimport os,glob\nimport numpy as np\nimport pandas as pd\nimport re,bisect\nfrom scipy import stats\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.size']=11\nimport seaborn as sns\nsns.set(font_scale=1.1)\nsns.set_style(\"whitegrid\", {'axes.grid' : False})\nsns.set_style(\"ticks\",{'ytick.color': 'k','axes.edgecolor': 'k'})\nmatplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\"]\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams[\"mathtext.rm\"] = \"Arial\"\n\n\n\n# def return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):\n\n# dci_file = '{}/{}/{}_{}{}.bed'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)\n# dci_df = pd.read_csv(dci_file,sep='\\t',header=None)\n# dci_df.columns=['chr','start','end','DCI']\n# dci_df.index = ['_'.join(ii) for ii in dci_df[['chr','start','end']].values.astype(str)]\n# return dci_df\n\ndef return_dci_df(DCI_dir,subdir,hm_mark,compr_type,suffix):\n\n dci_file = '{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_type,suffix)\n if os.path.isfile(dci_file):\n dci_df = pd.read_csv(dci_file,sep='\\t',index_col=4)\n dci_df.columns=['chr','start','end','IfOverlap','score','strand','DCI'] \n return dci_df\n else:\n return None\n \n\ndef scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre):\n\n compr_x = compr_type[0]\n compr_y = compr_type[1]\n \n test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,compr_y,suffix)\n # print(test_file)\n if os.path.isfile(test_file): \n dci_df_wt_over_vector = return_dci_df(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)\n up_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']>dci_thre].index \n dn_bins = dci_df_wt_over_vector[dci_df_wt_over_vector['DCI']<-1*dci_thre].index \n \n dci_df_x = return_dci_df(DCI_dir,subdir,hm_mark,compr_x,suffix)\n dci_df_y = return_dci_df(DCI_dir,subdir,hm_mark,compr_y,suffix)\n\n # scatter plot\n plt.figure(figsize=(2.1,2.1))\n plt.scatter(dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI'],c='tab:grey',s=3,alpha=1,rasterized=True,label='All genes')\n plt.scatter(dci_df_x.loc[up_bins,'DCI'],dci_df_y.loc[up_bins,'DCI'],c='tab:red',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$>{}$ in WT/Vector'.format(dci_thre))\n plt.scatter(dci_df_x.loc[dn_bins,'DCI'],dci_df_y.loc[dn_bins,'DCI'],c='tab:blue',s=3,alpha=1,rasterized=True,label='Genes w/ DCI$<{}$ in WT/Vector'.format(-1*dci_thre))\n \n # save and plot the correlation\n x,y = dci_df_x.loc[:,'DCI'],dci_df_y.loc[:,'DCI']\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) \n output_prename = '{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre)\n num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_s'] = r_value\n num_DCI_bins_df.loc[output_prename,'scatter_pearsonr_p'] = p_value\n x_sort = np.sort(x)\n plt.plot(x_sort,x_sort*slope+intercept,c = 'k',ls='--',lw=.8)\n plt.text(.97,.97,'$r={:.2f}$ '.format(r_value),fontsize=10,transform=plt.axes().transAxes,ha='right',va='top')\n \n plt.axhline(y=0,c='k',lw=1)\n plt.axvline(x=0,c='k',lw=1)\n # # plt.title('{} over {}'.format(cellType_labels[treatment],cellType_labels[control]))\n plt.legend(fontsize=10.5,borderaxespad=0.1,labelspacing=.1,handletextpad=0.1,\\\n handlelength=1,loc=\"upper left\",markerscale=3,bbox_to_anchor=[-0.12,1.36],frameon=False)\n xa,xb = cellType_labels[compr_x.split('_')[0]],cellType_labels[compr_x.split('_')[-1]]\n ya,yb = cellType_labels[compr_y.split('_')[0]],cellType_labels[compr_y.split('_')[-1]]\n plt.xlabel('DCI score ({} over {})'.format(xa,xb),fontsize=12)\n plt.ylabel('DCI score ({} over {})'.format(ya,yb),fontsize=12)\n plt.savefig('{}/{}/scatter_{}_{}_vs_{}{}_dci{}.png'.format(outdir,subdir,hm_mark,compr_x,compr_y,suffix,dci_thre),\\\n bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)\n plt.show()\n plt.close()\n return up_bins,dn_bins\n return [],[]\n\n\n\n\ndef plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,flag):\n \n test_file='{}/{}/{}_{}{}.csv'.format(DCI_dir,subdir,hm_mark,'WT_over_Vector',suffix)\n \n if os.path.isfile(test_file): \n box_vals = []\n xticklabels = []\n sig_vals,sig_colors = [],[]\n for compr_col in ['WT_over_Vector','DEL_over_WT','EIF_over_DEL','TPR_over_WT']:\n dci_df = return_dci_df(DCI_dir,subdir,hm_mark,compr_col,suffix)\n if dci_df is not None:\n box_val = dci_df.loc[selected_bins]['DCI'].values\n # save the values in box plots\n dci_df.loc[selected_bins].to_csv('{}/{}/box_{}_{}_genes{}_dci{}_{}.csv'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre,compr_col))\n s,p = stats.ttest_1samp(box_val,0)\n sig_vals.append('*' if p<0.05 else '')\n sig_colors.append('b' if s<0 else 'r')\n box_vals.append(box_val)\n xa,xb = cellType_labels[compr_col.split('_')[0]],cellType_labels[compr_col.split('_')[-1]] \n xticklabels.append('{} over {}'.format(xa,xb))\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} s'.format(title.split()[2],compr_col)] = '{:.2f}'.format(s) \n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'{} {} p'.format(title.split()[2],compr_col)] = '{:.2e}'.format(p) \n \n #print(box_vals) \n positions = np.arange(len(box_vals))\n fig = plt.figure(figsize=(.46*len(box_vals),2.2))\n g = plt.boxplot(box_vals,positions=positions,widths = .5,patch_artist=True,\\\n boxprops=dict(color='k',facecolor='w',fill=None,lw=1),\\\n medianprops=dict(color='k'),showfliers=False) \n # g = plt.violinplot(box_vals)\n \n # for position_id in np.arange(len(positions)):\n # scatter_x = np.random.normal(positions[position_id],0.06,len(box_vals[position_id]))\n # plt.scatter(scatter_x,box_vals[position_id],color=color,s=5,zorder=0,alpha=0.6,rasterized=True)\n \n # for compr_pos in [[0,1,'t'],[1,2,'t'],[2,3,'t']]:\n # mark_pvalue(compr_pos,positions,box_vals)\n plt.axes().set_xticklabels(xticklabels,rotation=30,ha='right',fontsize=12)\n plt.ylabel('DCI score'.format(hm_mark),fontsize=13)\n # plt.ylim([-1,2])\n for ii in positions:\n plt.scatter(ii,np.median(box_vals[ii]),marker=sig_vals[ii],color='red',s=77)\n # plt.axes().text(ii,0,sig_vals[ii-1],fontsize=28,va='top',ha='center',color='red')\n plt.axhline(y=0,c='k',lw=1)\n plt.title(title,fontsize=12)\n # plt.legend(fontsize=16,borderaxespad=0.2,labelspacing=.2,handletextpad=0.2,handlelength=1,loc=\"upper right\",frameon=False)\n plt.savefig('{}/{}/box_{}_{}_genes{}_dci{}.png'.format(outdir,subdir,hm_mark,flag,suffix,dci_thre),\\\n bbox_inches='tight',pad_inches=0.1,dpi=600,transparent=True)\n plt.show()\n plt.close()\n\n\n\n\n# ==== main() \n\ncellType_labels= {'Vector':'Vector',\\\n 'WT':'WT',\\\n 'DEL':'$\\Delta$cIDR',\\\n 'EIF':'UTX-eIF$_{IDR}$',\\\n 'TPR':'$\\Delta$TPR',\\\n 'MT2':'MT2',\\\n 'FUS':'UTX-FUS$_{IDR}$'}\n\n \noutdir = 'f4_promoter_DCI_scatter'\nos.makedirs(outdir,exist_ok=True)\n\n# project_dir=\"/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang\"\nproject_dir=\"/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang\"\n# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f2_DEG_promoter_DCI_non_normalized/f1_promoter_DCI_rename'.format(project_dir)\nDCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f1_DEG_promoter_DCI/f1_promoter_DCI'.format(project_dir)\n# DCI_dir='{}/f5_hichip/f1_hichip_bart3d_new/f0_run_bart3d_new/bart3d_DCI_rename'.format(project_dir)\n# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/f1_deseq2_out'.format(project_dir)\n# expr_dir='{}/f0_data_process/rna_seq/data_1st_submit_STAR_RSEM_new/f6_deg/fz_deseq2_out_combined'.format(project_dir)\n# deg_df = pd.read_csv('{}/deseq2_combined.csv'.format(expr_dir),index_col=0)\n\n\nsubdirs=['bart3d_dis200k_data_1st_submit','bart3d_dis200k_data202008',\n 'bart3d_dis500k_data_1st_submit','bart3d_dis500k_data202008']\n\ncompr_types = [['WT_over_Vector','DEL_over_WT'],['DEL_over_WT','EIF_over_DEL'],['WT_over_Vector','TPR_over_WT']]\nhm_marks = ['H3K4me3','H3K27ac']\nsuffixes=['_promoter_DCI']\ndci_thres = [2,5]\n\n\nnum_DCI_bins_df = pd.DataFrame()\nfor subdir in subdirs[1:2]: \n outdir_tmp='{}/{}'.format(outdir,subdir)\n os.makedirs(outdir_tmp,exist_ok=True)\n for hm_mark in hm_marks[:]:\n for suffix in suffixes[:]:\n for dci_thre in dci_thres[1:]:\n for compr_type in compr_types[:]:\n up_bins,dn_bins = scatter_plot_compr_DCI(num_DCI_bins_df,subdir,hm_mark,compr_type,suffix,dci_thre) \n \n # the box plot are exactly the same\n if compr_type[1]=='DEL_over_WT':\n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# up genes'] = len(up_bins) \n num_DCI_bins_df.loc['{}_{}_{}_dci{}'.format(subdir,hm_mark,suffix,dci_thre),'# dn genes'] = len(dn_bins) \n \n ##### box plot\n selected_bins = up_bins\n color = 'tab:red'\n title = 'Genes w/ DCI$>{}$ \\n in WT over Vector'.format(dci_thre)\n plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'increased')\n \n selected_bins = dn_bins\n color = 'tab:blue'\n title = 'Genes w/ DCI$<{}$ \\n in WT over Vector'.format(-1*dci_thre)\n plot_box_figs(subdir,hm_mark,suffix,selected_bins,color,title,dci_thre,num_DCI_bins_df,'decreased')\n \n\nnum_DCI_bins_df.to_csv(outdir+os.sep+'num_DCI_promoter_summary.csv')\n\n \n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):
input_features, output_features = [], []
for p in features:
if pandas.api.types.is_numeric_dtype(df[p]):
input_features.append({'name': p, 'type': 'numerical',
'preprocessing': {'missing_value_strategy':
'fill_with_mean', 'normalization': 'zscore'}})
elif pandas.api.types.is_string_dtype(df[p]):
input_features.append({'name': p, 'type': 'category'})
else:
raise TypeError(f'column {p} value isnt number or string')
if pandas.api.types.is_numeric_dtype(df[target]):
output_features.append({'name': target, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean',
'normalization': 'zscore'}})
elif pandas.api.types.is_string_dtype(df[p]):
output_features.append({'name': target, 'type': 'category'})
else:
raise TypeError(f'column {target} value isnt number or string')
return {'input_features': input_features, 'output_features':
output_features}
<|reserved_special_token_1|>
import pandas
def ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):
input_features, output_features = [], []
for p in features:
if pandas.api.types.is_numeric_dtype(df[p]):
input_features.append({'name': p, 'type': 'numerical',
'preprocessing': {'missing_value_strategy':
'fill_with_mean', 'normalization': 'zscore'}})
elif pandas.api.types.is_string_dtype(df[p]):
input_features.append({'name': p, 'type': 'category'})
else:
raise TypeError(f'column {p} value isnt number or string')
if pandas.api.types.is_numeric_dtype(df[target]):
output_features.append({'name': target, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean',
'normalization': 'zscore'}})
elif pandas.api.types.is_string_dtype(df[p]):
output_features.append({'name': target, 'type': 'category'})
else:
raise TypeError(f'column {target} value isnt number or string')
return {'input_features': input_features, 'output_features':
output_features}
<|reserved_special_token_1|>
import pandas
def ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):
input_features, output_features = [], []
for p in features:
if (pandas.api.types.is_numeric_dtype(df[p])):
input_features.append({'name': p, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
input_features.append({'name': p, 'type': 'category'})
else:
raise TypeError(f'column {p} value isnt number or string')
if (pandas.api.types.is_numeric_dtype(df[target])):
output_features.append({'name': target, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
output_features.append({'name': target, 'type': 'category'})
else:
raise TypeError(f'column {target} value isnt number or string')
return {
'input_features' : input_features,
'output_features': output_features,
}
|
flexible
|
{
"blob_id": "b7521a604fb49591df814d469f53d35574126fdb",
"index": 7609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if pandas.api.types.is_numeric_dtype(df[p]):\n input_features.append({'name': p, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy':\n 'fill_with_mean', 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n if pandas.api.types.is_numeric_dtype(df[target]):\n output_features.append({'name': target, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy': 'fill_with_mean',\n 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n return {'input_features': input_features, 'output_features':\n output_features}\n",
"step-3": "import pandas\n\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if pandas.api.types.is_numeric_dtype(df[p]):\n input_features.append({'name': p, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy':\n 'fill_with_mean', 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n if pandas.api.types.is_numeric_dtype(df[target]):\n output_features.append({'name': target, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy': 'fill_with_mean',\n 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n return {'input_features': input_features, 'output_features':\n output_features}\n",
"step-4": "import pandas\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if (pandas.api.types.is_numeric_dtype(df[p])):\n input_features.append({'name': p, 'type': 'numerical', \n 'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})\n elif (pandas.api.types.is_string_dtype(df[p])):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n \n if (pandas.api.types.is_numeric_dtype(df[target])):\n output_features.append({'name': target, 'type': 'numerical', \n 'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})\n elif (pandas.api.types.is_string_dtype(df[p])):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n \n return {\n 'input_features' : input_features,\n 'output_features': output_features,\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
config_prefix = '<'
config_suported_types = ['PNG', 'GIF', 'JPEG']
config_pattern = '^[A-Za-z0-9_]*$'
config_max_storage = int(1000000000.0)
config_max_name_length = 20
config_message_by_line = 2
config_max_message_length = 2000
config_max_emote_length = 8 * int(1000000.0)
config_pong = """
,;;;!!!!!;;.
:!!!!!!!!!!!!!!;
:!!!!!!!!!!!!!!!!!;
;!!!!!!!!!!!!!!!!!!!;
;!!!!! P O N G !!!!!!!
;!!!!!!!!!!!!!!!!!!!!'
;!!!!!!!!!!!!!!!!!!!'
:!!!!!!!!!!!!!!!!'
,!!!!!!!!!!!!!''
,;!!!'''''''
.!!!!'
!!!!`
`'
"""
<|reserved_special_token_1|>
config_prefix = "<"
config_suported_types = ["PNG", "GIF", "JPEG"]
config_pattern = "^[A-Za-z0-9_]*$"
config_max_storage = int(1E9)
config_max_name_length = 20
config_message_by_line = 2
config_max_message_length = 2000
config_max_emote_length = 8*int(1E6)
config_pong = """
,;;;!!!!!;;.
:!!!!!!!!!!!!!!;
:!!!!!!!!!!!!!!!!!;
;!!!!!!!!!!!!!!!!!!!;
;!!!!! P O N G !!!!!!!
;!!!!!!!!!!!!!!!!!!!!'
;!!!!!!!!!!!!!!!!!!!'
:!!!!!!!!!!!!!!!!'
,!!!!!!!!!!!!!''
,;!!!'''''''
.!!!!'
!!!!`
`'
"""
|
flexible
|
{
"blob_id": "dc2deb7d4c9cc126a6d80435fe9dbc16d6ac8941",
"index": 9397,
"step-1": "<mask token>\n",
"step-2": "config_prefix = '<'\nconfig_suported_types = ['PNG', 'GIF', 'JPEG']\nconfig_pattern = '^[A-Za-z0-9_]*$'\nconfig_max_storage = int(1000000000.0)\nconfig_max_name_length = 20\nconfig_message_by_line = 2\nconfig_max_message_length = 2000\nconfig_max_emote_length = 8 * int(1000000.0)\nconfig_pong = \"\"\"\n ,;;;!!!!!;;.\n :!!!!!!!!!!!!!!;\n :!!!!!!!!!!!!!!!!!;\n ;!!!!!!!!!!!!!!!!!!!;\n ;!!!!! P O N G !!!!!!!\n ;!!!!!!!!!!!!!!!!!!!!'\n ;!!!!!!!!!!!!!!!!!!!'\n :!!!!!!!!!!!!!!!!'\n ,!!!!!!!!!!!!!''\n ,;!!!'''''''\n.!!!!'\n!!!!`\n`'\n\"\"\"\n",
"step-3": "config_prefix = \"<\"\nconfig_suported_types = [\"PNG\", \"GIF\", \"JPEG\"]\nconfig_pattern = \"^[A-Za-z0-9_]*$\"\nconfig_max_storage = int(1E9)\nconfig_max_name_length = 20\nconfig_message_by_line = 2\nconfig_max_message_length = 2000\nconfig_max_emote_length = 8*int(1E6)\nconfig_pong = \"\"\"\n ,;;;!!!!!;;.\n :!!!!!!!!!!!!!!;\n :!!!!!!!!!!!!!!!!!;\n ;!!!!!!!!!!!!!!!!!!!;\n ;!!!!! P O N G !!!!!!!\n ;!!!!!!!!!!!!!!!!!!!!'\n ;!!!!!!!!!!!!!!!!!!!'\n :!!!!!!!!!!!!!!!!'\n ,!!!!!!!!!!!!!''\n ,;!!!'''''''\n.!!!!'\n!!!!`\n`'\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_covid():
covid = pd.read_csv(
'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'
)
target = 'new_cases'
date = 'date'
dataset = covid[covid['location'] == 'World'].copy()[[target, date]]
dataset[date] = pd.to_datetime(dataset[date])
dataset.index = dataset[date]
dataset['month'] = dataset['date'].dt.month
dataset = dataset.drop(columns=['date'])
return {'target': target, 'dataset': dataset}
<|reserved_special_token_1|>
import pandas as pd
def load_covid():
covid = pd.read_csv(
'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'
)
target = 'new_cases'
date = 'date'
dataset = covid[covid['location'] == 'World'].copy()[[target, date]]
dataset[date] = pd.to_datetime(dataset[date])
dataset.index = dataset[date]
dataset['month'] = dataset['date'].dt.month
dataset = dataset.drop(columns=['date'])
return {'target': target, 'dataset': dataset}
<|reserved_special_token_1|>
import pandas as pd
def load_covid():
covid = pd.read_csv("https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
target = 'new_cases'
date = 'date'
dataset = covid[(covid['location'] == 'World')].copy()[[target, date]]
dataset[date] = pd.to_datetime(dataset[date])
dataset.index = dataset[date]
dataset['month'] = dataset['date'].dt.month
dataset = dataset.drop(columns=['date'])
return {
'target': target,
'dataset': dataset,
}
|
flexible
|
{
"blob_id": "e19529dce407da0f1e21f6a3696efcefac9ed040",
"index": 8500,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_covid():\n covid = pd.read_csv(\n 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'\n )\n target = 'new_cases'\n date = 'date'\n dataset = covid[covid['location'] == 'World'].copy()[[target, date]]\n dataset[date] = pd.to_datetime(dataset[date])\n dataset.index = dataset[date]\n dataset['month'] = dataset['date'].dt.month\n dataset = dataset.drop(columns=['date'])\n return {'target': target, 'dataset': dataset}\n",
"step-3": "import pandas as pd\n\n\ndef load_covid():\n covid = pd.read_csv(\n 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv'\n )\n target = 'new_cases'\n date = 'date'\n dataset = covid[covid['location'] == 'World'].copy()[[target, date]]\n dataset[date] = pd.to_datetime(dataset[date])\n dataset.index = dataset[date]\n dataset['month'] = dataset['date'].dt.month\n dataset = dataset.drop(columns=['date'])\n return {'target': target, 'dataset': dataset}\n",
"step-4": "import pandas as pd\n\n\ndef load_covid():\n covid = pd.read_csv(\"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv\")\n\n target = 'new_cases'\n date = 'date'\n\n dataset = covid[(covid['location'] == 'World')].copy()[[target, date]]\n dataset[date] = pd.to_datetime(dataset[date])\n dataset.index = dataset[date]\n\n dataset['month'] = dataset['date'].dt.month\n dataset = dataset.drop(columns=['date'])\n\n return {\n 'target': target,\n 'dataset': dataset,\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def test_form(request):
print('test 함수 실행하자 ')
return render(request, 'emaillist/test_form.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_form(request):
print('test 함수 실행하자 ')
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn']
emaillist.last_name = request.POST['ln']
emaillist.email = request.POST['email']
emaillist.save()
return HttpResponseRedirect('/emaillist')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_index(request):
print('test_index 함수 실행하자 ')
emaillist_list = Emaillist.objects.all().order_by('-id')
data = {'emaillist_list': emaillist_list}
return render(request, 'emaillist/test_index.html', data)
def test_form(request):
print('test 함수 실행하자 ')
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn']
emaillist.last_name = request.POST['ln']
emaillist.email = request.POST['email']
emaillist.save()
return HttpResponseRedirect('/emaillist')
<|reserved_special_token_1|>
from django.shortcuts import render
from emaillist.models import Emaillist
from django.http import HttpResponseRedirect
def test_index(request):
print('test_index 함수 실행하자 ')
emaillist_list = Emaillist.objects.all().order_by('-id')
data = {'emaillist_list': emaillist_list}
return render(request, 'emaillist/test_index.html', data)
def test_form(request):
print('test 함수 실행하자 ')
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn']
emaillist.last_name = request.POST['ln']
emaillist.email = request.POST['email']
emaillist.save()
return HttpResponseRedirect('/emaillist')
<|reserved_special_token_1|>
from django.shortcuts import render
# from emaillist.models import Emaillist
from emaillist.models import Emaillist
from django.http import HttpResponseRedirect
# Create your views here.
# def index(request):
# emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장
# data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장
# return render(request, 'emaillist/index.html', data) # render 라는 임시변수에 url(request)에서 불러온 값으로 emillist/index.html 형식으로 data값을 출력한다.
def test_index(request):
print("test_index 함수 실행하자 ")
emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장
data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장
return render(request, 'emaillist/test_index.html', data)
# def form(request):
# return render(request, 'emaillist/form.html')
def test_form(request):
print("test 함수 실행하자 ")
return render(request, 'emaillist/test_form.html')
def add(request):
emaillist = Emaillist()
emaillist.first_name = request.POST['fn'] # 웹에 first_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 first_name column에 저장
emaillist.last_name = request.POST['ln'] # 웹에 last_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 last_name column에 저장
emaillist.email = request.POST['email'] # 웹에 email부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 email column에 저장
emaillist.save() # 저장된 내역을 DB에 저장
return HttpResponseRedirect('/emaillist') # 저장완료되면 기존 리스트 페이지로 이동
#
# def add2(request):
# emaillist2 = Emaillist2()
# emaillist2.first_name = request.POST['fn']
# emaillist2.last_name = request.POST['ln']
# emaillist2.email = request.POST['email']
#
# emaillist2.save()
#
# return HttpResponseRedirect('/emaillist')
|
flexible
|
{
"blob_id": "5220ad793788927e94caf7d6a42df11292851c67",
"index": 2734,
"step-1": "<mask token>\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-3": "<mask token>\n\n\ndef test_index(request):\n print('test_index 함수 실행하자 ')\n emaillist_list = Emaillist.objects.all().order_by('-id')\n data = {'emaillist_list': emaillist_list}\n return render(request, 'emaillist/test_index.html', data)\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-4": "from django.shortcuts import render\nfrom emaillist.models import Emaillist\nfrom django.http import HttpResponseRedirect\n\n\ndef test_index(request):\n print('test_index 함수 실행하자 ')\n emaillist_list = Emaillist.objects.all().order_by('-id')\n data = {'emaillist_list': emaillist_list}\n return render(request, 'emaillist/test_index.html', data)\n\n\ndef test_form(request):\n print('test 함수 실행하자 ')\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn']\n emaillist.last_name = request.POST['ln']\n emaillist.email = request.POST['email']\n emaillist.save()\n return HttpResponseRedirect('/emaillist')\n",
"step-5": "from django.shortcuts import render\n# from emaillist.models import Emaillist\nfrom emaillist.models import Emaillist\nfrom django.http import HttpResponseRedirect\n\n# Create your views here.\n\n# def index(request):\n# emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장\n# data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장\n# return render(request, 'emaillist/index.html', data) # render 라는 임시변수에 url(request)에서 불러온 값으로 emillist/index.html 형식으로 data값을 출력한다.\n\n\ndef test_index(request):\n print(\"test_index 함수 실행하자 \")\n emaillist_list = Emaillist.objects.all().order_by('-id') # db에서 objects 전체를 불러와서 변수에 저장\n data = {'emaillist_list':emaillist_list} # 딕션너리 형식으로 데이터에 저장\n return render(request, 'emaillist/test_index.html', data)\n\n# def form(request):\n# return render(request, 'emaillist/form.html')\n\ndef test_form(request):\n print(\"test 함수 실행하자 \")\n return render(request, 'emaillist/test_form.html')\n\n\ndef add(request):\n emaillist = Emaillist()\n emaillist.first_name = request.POST['fn'] # 웹에 first_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 first_name column에 저장\n emaillist.last_name = request.POST['ln'] # 웹에 last_name부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 last_name column에 저장\n emaillist.email = request.POST['email'] # 웹에 email부분에 작성한 값 (index.html에서 input으로 받은 password) 을 가져와서 데이터베이스(emailist)의 email column에 저장\n\n emaillist.save() # 저장된 내역을 DB에 저장\n\n return HttpResponseRedirect('/emaillist') # 저장완료되면 기존 리스트 페이지로 이동\n#\n# def add2(request):\n# emaillist2 = Emaillist2()\n# emaillist2.first_name = request.POST['fn']\n# emaillist2.last_name = request.POST['ln']\n# emaillist2.email = request.POST['email']\n#\n# emaillist2.save()\n#\n# return HttpResponseRedirect('/emaillist')",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
<|reserved_special_token_0|>
def aggregator(values, old):
return (old or 0) + sum(values)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
<|reserved_special_token_0|>
def aggregator(values, old):
return (old or 0) + sum(values)
<|reserved_special_token_0|>
ssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',
time.gmtime())))
ssc.start()
ssc.awaitTermination()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sc = SparkContext(master='local[4]')
ssc = StreamingContext(sc, batchDuration=10)
producer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'
], value_serializer=lambda x: x.encode('utf-8'))
dstream = KafkaUtils.createDirectStream(ssc, topics=['had2020011-topic'],
kafkaParams={'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'})
<|reserved_special_token_0|>
keywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]
remove = dict.fromkeys(map(ord, '\n ' + string.punctuation))
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
initialized = False
def aggregator(values, old):
return (old or 0) + sum(values)
initState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))
result = dstream.flatMap(lambda pair: pair[1].split(' ')).map(lambda word:
word.translate(remove)).filter(lambda word: word in keywords).map(lambda
word: (word.lower(), 1)).reduceByKeyAndWindow(lambda x, y: x + y, lambda
x, y: x - y, 60, 60).updateStateByKey(aggregator, initialRDD=initState
).foreachRDD(lambda rdd: send_rdd(rdd))
ssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',
time.gmtime())))
ssc.start()
ssc.awaitTermination()
<|reserved_special_token_1|>
from pyspark import SparkContext, RDD
from pyspark.sql import SparkSession, DataFrame
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
import string
from kafka import KafkaProducer
import time
import pyspark
sc = SparkContext(master='local[4]')
ssc = StreamingContext(sc, batchDuration=10)
producer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'
], value_serializer=lambda x: x.encode('utf-8'))
dstream = KafkaUtils.createDirectStream(ssc, topics=['had2020011-topic'],
kafkaParams={'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'})
import sys
keywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]
remove = dict.fromkeys(map(ord, '\n ' + string.punctuation))
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
initialized = False
def aggregator(values, old):
return (old or 0) + sum(values)
initState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))
result = dstream.flatMap(lambda pair: pair[1].split(' ')).map(lambda word:
word.translate(remove)).filter(lambda word: word in keywords).map(lambda
word: (word.lower(), 1)).reduceByKeyAndWindow(lambda x, y: x + y, lambda
x, y: x - y, 60, 60).updateStateByKey(aggregator, initialRDD=initState
).foreachRDD(lambda rdd: send_rdd(rdd))
ssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',
time.gmtime())))
ssc.start()
ssc.awaitTermination()
<|reserved_special_token_1|>
from pyspark import SparkContext, RDD
from pyspark.sql import SparkSession, DataFrame
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
import string
from kafka import KafkaProducer
import time
import pyspark
sc = SparkContext(master='local[4]')
ssc = StreamingContext(sc, batchDuration=10)
# producer = df \
# .selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)") \
# .writeStream \
# .format("kafka") \
# .option("kafka.bootstrap.servers", "host1:port1,host2:port2") \
# .option("topic", "topic1") \
# .start()
producer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'],
value_serializer=lambda x:
x.encode('utf-8'))
dstream = KafkaUtils.createDirectStream(
ssc, topics=['had2020011-topic'],
kafkaParams = {'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'}
)
import sys
keywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]
remove = dict.fromkeys(map(ord, '\n ' + string.punctuation))
def send_rdd(rdd):
out_list = rdd.collect()
for word in out_list:
producer.send('had2020011-out', value=str(word))
initialized = False
def aggregator(values, old):
return (old or 0) + sum(values)
initState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))
result = dstream \
.flatMap(lambda pair: pair[1].split(" ")) \
.map(lambda word: word.translate(remove)) \
.filter(lambda word: word in keywords) \
.map(lambda word: (word.lower(), 1)) \
.reduceByKeyAndWindow(lambda x, y: x + y, lambda x, y: x - y, 60, 60) \
.updateStateByKey(aggregator, initialRDD=initState) \
.foreachRDD(lambda rdd : send_rdd(rdd))
# \
ssc.checkpoint('./checkpoint{}'.format(time.strftime("%Y_%m_%d_%H_%M_%s", time.gmtime())))
ssc.start()
ssc.awaitTermination()
|
flexible
|
{
"blob_id": "12fdeae0ae1618139b20176846e7df5b82f7aa01",
"index": 8274,
"step-1": "<mask token>\n\n\ndef send_rdd(rdd):\n out_list = rdd.collect()\n for word in out_list:\n producer.send('had2020011-out', value=str(word))\n\n\n<mask token>\n\n\ndef aggregator(values, old):\n return (old or 0) + sum(values)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_rdd(rdd):\n out_list = rdd.collect()\n for word in out_list:\n producer.send('had2020011-out', value=str(word))\n\n\n<mask token>\n\n\ndef aggregator(values, old):\n return (old or 0) + sum(values)\n\n\n<mask token>\nssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',\n time.gmtime())))\nssc.start()\nssc.awaitTermination()\n",
"step-3": "<mask token>\nsc = SparkContext(master='local[4]')\nssc = StreamingContext(sc, batchDuration=10)\nproducer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'\n ], value_serializer=lambda x: x.encode('utf-8'))\ndstream = KafkaUtils.createDirectStream(ssc, topics=['had2020011-topic'],\n kafkaParams={'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'})\n<mask token>\nkeywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]\nremove = dict.fromkeys(map(ord, '\\n ' + string.punctuation))\n\n\ndef send_rdd(rdd):\n out_list = rdd.collect()\n for word in out_list:\n producer.send('had2020011-out', value=str(word))\n\n\ninitialized = False\n\n\ndef aggregator(values, old):\n return (old or 0) + sum(values)\n\n\ninitState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))\nresult = dstream.flatMap(lambda pair: pair[1].split(' ')).map(lambda word:\n word.translate(remove)).filter(lambda word: word in keywords).map(lambda\n word: (word.lower(), 1)).reduceByKeyAndWindow(lambda x, y: x + y, lambda\n x, y: x - y, 60, 60).updateStateByKey(aggregator, initialRDD=initState\n ).foreachRDD(lambda rdd: send_rdd(rdd))\nssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',\n time.gmtime())))\nssc.start()\nssc.awaitTermination()\n",
"step-4": "from pyspark import SparkContext, RDD\nfrom pyspark.sql import SparkSession, DataFrame\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nimport string\nfrom kafka import KafkaProducer\nimport time\nimport pyspark\nsc = SparkContext(master='local[4]')\nssc = StreamingContext(sc, batchDuration=10)\nproducer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'\n ], value_serializer=lambda x: x.encode('utf-8'))\ndstream = KafkaUtils.createDirectStream(ssc, topics=['had2020011-topic'],\n kafkaParams={'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'})\nimport sys\nkeywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]\nremove = dict.fromkeys(map(ord, '\\n ' + string.punctuation))\n\n\ndef send_rdd(rdd):\n out_list = rdd.collect()\n for word in out_list:\n producer.send('had2020011-out', value=str(word))\n\n\ninitialized = False\n\n\ndef aggregator(values, old):\n return (old or 0) + sum(values)\n\n\ninitState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))\nresult = dstream.flatMap(lambda pair: pair[1].split(' ')).map(lambda word:\n word.translate(remove)).filter(lambda word: word in keywords).map(lambda\n word: (word.lower(), 1)).reduceByKeyAndWindow(lambda x, y: x + y, lambda\n x, y: x - y, 60, 60).updateStateByKey(aggregator, initialRDD=initState\n ).foreachRDD(lambda rdd: send_rdd(rdd))\nssc.checkpoint('./checkpoint{}'.format(time.strftime('%Y_%m_%d_%H_%M_%s',\n time.gmtime())))\nssc.start()\nssc.awaitTermination()\n",
"step-5": "from pyspark import SparkContext, RDD\nfrom pyspark.sql import SparkSession, DataFrame\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nimport string\nfrom kafka import KafkaProducer\nimport time\nimport pyspark\n\n\nsc = SparkContext(master='local[4]')\nssc = StreamingContext(sc, batchDuration=10)\n\n# producer = df \\\n# .selectExpr(\"CAST(key AS STRING)\", \"CAST(value AS STRING)\") \\\n# .writeStream \\\n# .format(\"kafka\") \\\n# .option(\"kafka.bootstrap.servers\", \"host1:port1,host2:port2\") \\\n# .option(\"topic\", \"topic1\") \\\n# .start()\n\nproducer = KafkaProducer(bootstrap_servers=['mipt-node06.atp-fivt.org:9092'],\n value_serializer=lambda x:\n x.encode('utf-8'))\n\n\ndstream = KafkaUtils.createDirectStream(\n ssc, topics=['had2020011-topic'],\n kafkaParams = {'metadata.broker.list': 'mipt-node06.atp-fivt.org:9092'}\n)\n\nimport sys\nkeywords = ['lol', 'kek'] if len(sys.argv) <= 1 else sys.argv[1:]\nremove = dict.fromkeys(map(ord, '\\n ' + string.punctuation))\n\ndef send_rdd(rdd):\n out_list = rdd.collect()\n for word in out_list:\n producer.send('had2020011-out', value=str(word))\n\ninitialized = False\n\ndef aggregator(values, old):\n return (old or 0) + sum(values)\n\ninitState = sc.parallelize(list(zip(keywords, [0] * len(keywords))))\n\nresult = dstream \\\n .flatMap(lambda pair: pair[1].split(\" \")) \\\n .map(lambda word: word.translate(remove)) \\\n .filter(lambda word: word in keywords) \\\n .map(lambda word: (word.lower(), 1)) \\\n .reduceByKeyAndWindow(lambda x, y: x + y, lambda x, y: x - y, 60, 60) \\\n .updateStateByKey(aggregator, initialRDD=initState) \\\n .foreachRDD(lambda rdd : send_rdd(rdd))\n # \\\n\n\n\nssc.checkpoint('./checkpoint{}'.format(time.strftime(\"%Y_%m_%d_%H_%M_%s\", time.gmtime())))\nssc.start()\nssc.awaitTermination()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from datareader import *
import matplotlib.pyplot as plt
from plotting import *
from misc import *
import leastSquares as lsModel
import masim as mAvgSim
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime as dt
from time import mktime
def main():
# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',
# start='2019-09-16',end='2020-11-12')
# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)
# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')
# df_stock = pd.read_csv('603131.csv')
# df_cypt = pd.read_csv('LINK-USD.csv')
# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')
# sim = mAvgSim.movingAverageSim(df_stock)
# sim = mAvgSim.movingAverageSim(df_cypt)
# net,num_trades,test_error = sim.run_simulation(ndays=15)
# sim.plot_graph()
# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)
daily_signal_checker('china_stocks.csv',location='chineseStocks/')
# update_open_close('china_stocks.csv',location='chineseStocks/')
# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)
# df_stock = tmp.read_csv('chineseStocks/')
# open_price = tmp.get_today_open()
# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
# sim = mAvgSim.movingAverageSim(df_stock)
# sim.run_simulation(ndays=5)
# signals = sim.produce_buy_sell(ndays=1)
# print(signals)
def update_portfolio():
portfolio = pd.read_csv(portfolio)
def daily_signal_checker(stocks,location):
ndays=6
# Get updated stock prices (whole csv)
# scrape_data(pd.read_csv(stocks),location='chineseStocks/',
# start='2019-09-16',end='2020-11-24')
# Run through stock list to get opens and predict
stock_list = pd.read_csv(stocks)
for code in stock_list['Code']:
tmp = backtest_database(code,'2019-09-16','2020-11-18',1)
df_stock = tmp.read_csv(location=location)
open_price = float(tmp.get_today_open())
# print(code)
print(open_price)
df_stock = df_stock.append({'Open' : open_price},ignore_index=True)
sim = mAvgSim.movingAverageSim(df_stock)
signals = sim.produce_buy_sell(ndays=ndays)
print("Company:",code,
"Signals:",signals)
def scrape_data(stock_list,location,start,end):
for code in stock_list['Code']:
print("Got Code:",code)
tmp = backtest_database(code,start,end,1)
tmp.create_csv(location=location)
def test_stock_list(stock_list,location,ndays):
returns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])
for code in stock_list['Code']:
print(code)
df_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)
sim = mAvgSim.movingAverageSim(df_stock)
net,num_trades,test_error = sim.run_simulation(ndays=ndays)
if num_trades == 0:
continue
returns = returns.append({
'Company' : code,
'No. Trades' : num_trades,
'Net return' : net,
'Test Error' : test_error
},ignore_index=True)
# print('Company:',code,'\n Number of Trades',num_trades,'\n Net % return',net)
print("Mean Test Error = ", np.mean(returns['Test Error']))
net_profit = np.sum(returns['Net return'])
companies_traded = len(returns)
mean = stat.mean(returns['Net return'])
std = stat.stdev(returns['Net return'])
print("Net Profit =",net_profit,
'\n Total number of companies traded =',companies_traded,
'\n Mean Profit =',mean,
'\n Standard Deviation',std)
print(returns)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "8d5e652fda3fb172e6faab4153bca8f78c114cd1",
"index": 7973,
"step-1": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\n<mask token>\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\n\ndef main():\n daily_signal_checker('china_stocks.csv', location='chineseStocks/')\n\n\ndef update_portfolio():\n portfolio = pd.read_csv(portfolio)\n\n\ndef daily_signal_checker(stocks, location):\n ndays = 6\n stock_list = pd.read_csv(stocks)\n for code in stock_list['Code']:\n tmp = backtest_database(code, '2019-09-16', '2020-11-18', 1)\n df_stock = tmp.read_csv(location=location)\n open_price = float(tmp.get_today_open())\n print(open_price)\n df_stock = df_stock.append({'Open': open_price}, ignore_index=True)\n sim = mAvgSim.movingAverageSim(df_stock)\n signals = sim.produce_buy_sell(ndays=ndays)\n print('Company:', code, 'Signals:', signals)\n\n\ndef scrape_data(stock_list, location, start, end):\n for code in stock_list['Code']:\n print('Got Code:', code)\n tmp = backtest_database(code, start, end, 1)\n tmp.create_csv(location=location)\n\n\ndef test_stock_list(stock_list, location, ndays):\n returns = pd.DataFrame(columns=['Company', 'No. Trades', 'Net return',\n 'Test Error'])\n for code in stock_list['Code']:\n print(code)\n df_stock = backtest_database(code, '2019-09-16', '2020-02-17', 1\n ).read_csv(location=location)\n sim = mAvgSim.movingAverageSim(df_stock)\n net, num_trades, test_error = sim.run_simulation(ndays=ndays)\n if num_trades == 0:\n continue\n returns = returns.append({'Company': code, 'No. Trades': num_trades,\n 'Net return': net, 'Test Error': test_error}, ignore_index=True)\n print('Mean Test Error = ', np.mean(returns['Test Error']))\n net_profit = np.sum(returns['Net return'])\n companies_traded = len(returns)\n mean = stat.mean(returns['Net return'])\n std = stat.stdev(returns['Net return'])\n print('Net Profit =', net_profit,\n '\\n Total number of companies traded =', companies_traded,\n '\\n Mean Profit =', mean, \"\"\"\n Standard Deviation\"\"\", std)\n print(returns)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from datareader import *\nimport matplotlib.pyplot as plt\nfrom plotting import *\nfrom misc import *\nimport leastSquares as lsModel\nimport masim as mAvgSim\nimport numpy as np\nimport pandas as pd\nimport statistics as stat\nfrom datetime import datetime as dt\nfrom time import mktime\n\ndef main():\n\t# scrape_data(pd.read_csv('china_stocks.csv'),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-12')\n\t# cypt_scrape = backtest_database('LINK-USD','2019-09-16','2020-11-12',1)\n\t# cypt_scrape.create_csv('/Users/jimmylin/Desktop/Quant_Trading/Trading/')\n\t# df_stock = pd.read_csv('603131.csv')\n\t# df_cypt = pd.read_csv('LINK-USD.csv')\n\t# df_stock = backtest_database('603993.SS','2019-09-16','2020-11-17',1).read_csv(location='chineseStocks/')\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim = mAvgSim.movingAverageSim(df_cypt)\n\t# net,num_trades,test_error = sim.run_simulation(ndays=15)\n\t# sim.plot_graph()\n\t# test_stock_list(stock_list=pd.read_csv('china_stocks.csv'),location='chineseStocks/',ndays=4)\n\tdaily_signal_checker('china_stocks.csv',location='chineseStocks/')\n\t# update_open_close('china_stocks.csv',location='chineseStocks/')\n\t# tmp = backtest_database('300261.SZ','2019-09-16','2020-02-16',1)\n\t# df_stock = tmp.read_csv('chineseStocks/')\n\t# open_price = tmp.get_today_open()\n\t# df_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t# sim = mAvgSim.movingAverageSim(df_stock)\n\t# sim.run_simulation(ndays=5)\n\t# signals = sim.produce_buy_sell(ndays=1)\n\t# print(signals)\n\ndef update_portfolio():\n\tportfolio = pd.read_csv(portfolio)\n\ndef daily_signal_checker(stocks,location):\n\tndays=6\n\t# Get updated stock prices (whole csv)\n\t# scrape_data(pd.read_csv(stocks),location='chineseStocks/',\n\t# \t\t\t\t\t\tstart='2019-09-16',end='2020-11-24')\n\t# Run through stock list to get opens and predict\n\tstock_list = pd.read_csv(stocks)\n\tfor code in stock_list['Code']:\n\t\ttmp = backtest_database(code,'2019-09-16','2020-11-18',1)\n\t\tdf_stock = tmp.read_csv(location=location)\n\t\topen_price = float(tmp.get_today_open())\n\t\t# print(code)\n\t\tprint(open_price)\n\t\tdf_stock = df_stock.append({'Open' : open_price},ignore_index=True)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tsignals = sim.produce_buy_sell(ndays=ndays)\n\t\tprint(\"Company:\",code,\n\t\t\t\"Signals:\",signals)\n\ndef scrape_data(stock_list,location,start,end):\n\tfor code in stock_list['Code']:\n\t\tprint(\"Got Code:\",code)\n\t\ttmp = backtest_database(code,start,end,1)\n\t\ttmp.create_csv(location=location)\n\ndef test_stock_list(stock_list,location,ndays):\n\treturns = pd.DataFrame(columns=['Company','No. Trades','Net return','Test Error'])\n\tfor code in stock_list['Code']:\n\t\tprint(code)\n\t\tdf_stock = backtest_database(code,'2019-09-16','2020-02-17',1).read_csv(location=location)\n\t\tsim = mAvgSim.movingAverageSim(df_stock)\n\t\tnet,num_trades,test_error = sim.run_simulation(ndays=ndays)\n\t\tif num_trades == 0:\n\t\t\tcontinue\n\t\treturns = returns.append({\n\t\t\t'Company' : code,\n\t\t\t'No. Trades' : num_trades,\n\t\t\t'Net return' : net,\n\t\t\t'Test Error' : test_error\n\t\t},ignore_index=True)\n\t\t# print('Company:',code,'\\n Number of Trades',num_trades,'\\n Net % return',net)\n\tprint(\"Mean Test Error = \", np.mean(returns['Test Error']))\n\tnet_profit = np.sum(returns['Net return'])\n\tcompanies_traded = len(returns)\n\tmean = stat.mean(returns['Net return'])\n\tstd = stat.stdev(returns['Net return'])\n\tprint(\"Net Profit =\",net_profit,\n\t\t'\\n Total number of companies traded =',companies_traded,\n\t\t'\\n Mean Profit =',mean,\n\t\t'\\n Standard Deviation',std)\n\tprint(returns)\n\n\nif __name__ == \"__main__\":\n\tmain()\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt', 'r') as f:
data = f.read()
<|reserved_special_token_0|>
print(res)
<|reserved_special_token_0|>
for line in res:
newHold.append(tuple(int(i) for i in line.split(', ')))
print(newHold)
<|reserved_special_token_0|>
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
<|reserved_special_token_0|>
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j
] == 0:
mapper[i][j] = distance.cityblock(first, [i, j])
elif mapper[i][j] == distance.cityblock(first, [i, j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap='viridis')
plt.show()
plt.imshow(mapper, cmap='viridis')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('input.txt', 'r') as f:
data = f.read()
res = [i for i in data.splitlines()]
print(res)
newHold = []
for line in res:
newHold.append(tuple(int(i) for i in line.split(', ')))
print(newHold)
mapper = np.zeros((400, 400))
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
rows = mapper.shape[0]
cols = mapper.shape[1]
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j
] == 0:
mapper[i][j] = distance.cityblock(first, [i, j])
elif mapper[i][j] == distance.cityblock(first, [i, j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap='viridis')
plt.show()
plt.imshow(mapper, cmap='viridis')
plt.show()
<|reserved_special_token_1|>
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
with open('input.txt', 'r') as f:
data = f.read()
res = [i for i in data.splitlines()]
print(res)
newHold = []
for line in res:
newHold.append(tuple(int(i) for i in line.split(', ')))
print(newHold)
mapper = np.zeros((400, 400))
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
rows = mapper.shape[0]
cols = mapper.shape[1]
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j
] == 0:
mapper[i][j] = distance.cityblock(first, [i, j])
elif mapper[i][j] == distance.cityblock(first, [i, j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap='viridis')
plt.show()
plt.imshow(mapper, cmap='viridis')
plt.show()
<|reserved_special_token_1|>
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
with open('input.txt', 'r') as f:
data = f.read()
res = [i for i in data.splitlines()]
print(res)
newHold = []
for line in res:
newHold.append((tuple(int(i) for i in line.split(', '))))
print(newHold)
mapper = np.zeros((400,400))
#plt.scatter(*zip(*newHold))
#plt.show()
for i, tup in enumerate(newHold):
x = tup[0]
y = tup[1]
if mapper[y][x] == 0:
mapper[y][x] = i
rows = mapper.shape[0]
cols = mapper.shape[1]
for num, top in enumerate(newHold):
first = list(newHold[num])
for i in range(0, rows):
for j in range(0, cols):
if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):
mapper[i][j] = distance.cityblock(first, [i,j])
elif mapper[i][j] == distance.cityblock(first, [i,j]):
mapper[i][j] = -1000
print(num)
plt.imshow(mapper, cmap="viridis")
plt.show()
plt.imshow(mapper, cmap="viridis")
plt.show()
|
flexible
|
{
"blob_id": "47476fbb78ca8ce14d30bf226795bbd85b5bae45",
"index": 6939,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\n<mask token>\nprint(res)\n<mask token>\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\n<mask token>\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n<mask token>\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-3": "<mask token>\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-4": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\nwith open('input.txt', 'r') as f:\n data = f.read()\nres = [i for i in data.splitlines()]\nprint(res)\nnewHold = []\nfor line in res:\n newHold.append(tuple(int(i) for i in line.split(', ')))\nprint(newHold)\nmapper = np.zeros((400, 400))\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\nrows = mapper.shape[0]\ncols = mapper.shape[1]\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if mapper[i][j] > distance.cityblock(first, [i, j]) or mapper[i][j\n ] == 0:\n mapper[i][j] = distance.cityblock(first, [i, j])\n elif mapper[i][j] == distance.cityblock(first, [i, j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap='viridis')\n plt.show()\nplt.imshow(mapper, cmap='viridis')\nplt.show()\n",
"step-5": "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import distance\n\nwith open('input.txt', 'r') as f:\n data = f.read()\n\nres = [i for i in data.splitlines()]\nprint(res)\n\nnewHold = []\nfor line in res:\n newHold.append((tuple(int(i) for i in line.split(', '))))\nprint(newHold)\nmapper = np.zeros((400,400))\n\n#plt.scatter(*zip(*newHold))\n#plt.show()\n\nfor i, tup in enumerate(newHold):\n x = tup[0]\n y = tup[1]\n if mapper[y][x] == 0:\n mapper[y][x] = i\n\nrows = mapper.shape[0]\ncols = mapper.shape[1]\n\nfor num, top in enumerate(newHold):\n first = list(newHold[num])\n for i in range(0, rows):\n for j in range(0, cols):\n if ((mapper[i][j] > distance.cityblock(first, [i,j])) or (mapper[i][j] == 0)):\n mapper[i][j] = distance.cityblock(first, [i,j])\n elif mapper[i][j] == distance.cityblock(first, [i,j]):\n mapper[i][j] = -1000\n print(num)\n plt.imshow(mapper, cmap=\"viridis\")\n plt.show()\n\nplt.imshow(mapper, cmap=\"viridis\")\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Compare 1-D analytical sphere solution to 1-D numerical and 3-D Comsol solutions
for transient heat conduction in solid sphere with constant k and Cp.
Assumptions:
Convection boundary condition at surface.
Symmetry about the center of the solid.
Heat transfer via radiation assumed to be negligable.
Particle does not shrink or expand in size during pyrolysis.
Reference: Wood Handbook 2010
Requirements: Python 3, NumPy, SciPy, Matplotlib, funcHeatCond, funcTheta, funcOther
"""
import numpy as np
import matplotlib.pyplot as py
from funcHeatCond import hc3
from funcTheta import theta
from funcOther import vol, Tvol
# Parameters
# -----------------------------------------------------------------------------
d = 0.001 # diameter of sphere, m
Gb = 0.54 # basic specific gravity, Wood Handbook Table 4-7, (-)
cp = 1800 # heat capacity, J/kg*K
k = 0.12 # thermal conductivity, W/mK
x = 0 # moisture content, %
h = 350 # heat transfer coefficient, W/m^2*K
Ti = 293 # initial particle temp, K
Tinf = 773 # ambient temp, K
# 1D Numerical Solution for Transient Heat Conduction in Solid Sphere
# -----------------------------------------------------------------------------
# number of nodes from center of particle (m=0) to surface (m)
m = 1000
# time vector from 0 to max time
tmax = 4.0 # max time, s
dt = 0.01 # time step, s
nt = tmax/dt # number of time steps
t = np.arange(0, tmax+dt, dt) # time vector, s
# intraparticle temperature array [T] in Kelvin
# row = time step, column = node point from 0 (center) to m (surface)
T = hc3(d, cp, k, Gb, h, Ti, Tinf, 2, m, t)
Tavg = [np.mean(row) for row in T]
# volume average temperature at each time step
v = vol(d, m)
Tv = Tvol(T, v)
# 1D Analytical Solution for Transient Heat Conduction in Solid Sphere
# -----------------------------------------------------------------------------
ro = d/2 # radius of sphere (a.k.a outer radius), m
rs = ro/ro # dimensionless surface radius, (-)
rc = 1e-12/ro # dimensionless center radius, (-)
z = np.arange(0, 1250, 0.1) # range to evaluate the zeta, Bi equation
z[0] = 1e-12 # prevent divide by zero warning
rho = Gb*1000 # density, kg/m^3
alpha = k/(rho*cp) # thermal diffusivity biomass, m^2/s
Bi = (h*ro)/k # Biot number, (-)
Fo = (alpha * t) / (ro**2) # Fourier number, (-)
# surface temperature where ro for outer surface, b=2 for sphere
thetaRo = theta(rs, 2, z, Bi, Fo) # dimensionless temperature profile
T_o = Tinf + thetaRo*(Ti-Tinf) # convert theta to temperature in Kelvin, K
# center temperature where r for center, b=2 for sphere
thetaR = theta(rc, 2, z, Bi, Fo) # dimensionless temperature profile
T_r = Tinf + thetaR*(Ti-Tinf) # convert theta to temperature in Kelvin, K
# 3D Solid Sphere Temperature Data from Comsol
# -----------------------------------------------------------------------------
sphere = 'comsol/3d-sphere-temps.txt'
t_sphere, Tv_sphere, Tc_sphere, Ts_sphere = np.loadtxt(sphere, skiprows=5, unpack=True)
# Plot Results
# -----------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')
py.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')
py.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')
py.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Analytical and 1-D Numerical')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(2)
py.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')
py.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')
py.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')
py.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Analytical and 3-D Comsol')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(3)
py.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d' )
py.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')
py.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')
py.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')
py.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')
py.plot(t, Tv, 'g-', lw=2, label='Tv_1d')
#py.plot(t, Tavg, 'y-', lw=2, label='Tavg_1d')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('1-D Numerical and 3-D Comsol')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
|
normal
|
{
"blob_id": "15ca54aff4c688733c9c514ba5856e6bf29a3292",
"index": 8345,
"step-1": "<mask token>\n\n\ndef despine():\n ax = py.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n py.tick_params(axis='both', bottom='off', top='off', left='off', right=\n 'off')\n\n\n<mask token>\n",
"step-2": "<mask token>\npy.ion()\npy.close('all')\n\n\ndef despine():\n ax = py.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n py.tick_params(axis='both', bottom='off', top='off', left='off', right=\n 'off')\n\n\npy.figure(1)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 1-D Numerical')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(2)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(3)\npy.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')\npy.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')\npy.plot(t, Tv, 'g-', lw=2, label='Tv_1d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Numerical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n",
"step-3": "<mask token>\nd = 0.001\nGb = 0.54\ncp = 1800\nk = 0.12\nx = 0\nh = 350\nTi = 293\nTinf = 773\nm = 1000\ntmax = 4.0\ndt = 0.01\nnt = tmax / dt\nt = np.arange(0, tmax + dt, dt)\nT = hc3(d, cp, k, Gb, h, Ti, Tinf, 2, m, t)\nTavg = [np.mean(row) for row in T]\nv = vol(d, m)\nTv = Tvol(T, v)\nro = d / 2\nrs = ro / ro\nrc = 1e-12 / ro\nz = np.arange(0, 1250, 0.1)\nz[0] = 1e-12\nrho = Gb * 1000\nalpha = k / (rho * cp)\nBi = h * ro / k\nFo = alpha * t / ro ** 2\nthetaRo = theta(rs, 2, z, Bi, Fo)\nT_o = Tinf + thetaRo * (Ti - Tinf)\nthetaR = theta(rc, 2, z, Bi, Fo)\nT_r = Tinf + thetaR * (Ti - Tinf)\nsphere = 'comsol/3d-sphere-temps.txt'\nt_sphere, Tv_sphere, Tc_sphere, Ts_sphere = np.loadtxt(sphere, skiprows=5,\n unpack=True)\npy.ion()\npy.close('all')\n\n\ndef despine():\n ax = py.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n py.tick_params(axis='both', bottom='off', top='off', left='off', right=\n 'off')\n\n\npy.figure(1)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 1-D Numerical')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(2)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(3)\npy.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')\npy.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')\npy.plot(t, Tv, 'g-', lw=2, label='Tv_1d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Numerical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as py\nfrom funcHeatCond import hc3\nfrom funcTheta import theta\nfrom funcOther import vol, Tvol\nd = 0.001\nGb = 0.54\ncp = 1800\nk = 0.12\nx = 0\nh = 350\nTi = 293\nTinf = 773\nm = 1000\ntmax = 4.0\ndt = 0.01\nnt = tmax / dt\nt = np.arange(0, tmax + dt, dt)\nT = hc3(d, cp, k, Gb, h, Ti, Tinf, 2, m, t)\nTavg = [np.mean(row) for row in T]\nv = vol(d, m)\nTv = Tvol(T, v)\nro = d / 2\nrs = ro / ro\nrc = 1e-12 / ro\nz = np.arange(0, 1250, 0.1)\nz[0] = 1e-12\nrho = Gb * 1000\nalpha = k / (rho * cp)\nBi = h * ro / k\nFo = alpha * t / ro ** 2\nthetaRo = theta(rs, 2, z, Bi, Fo)\nT_o = Tinf + thetaRo * (Ti - Tinf)\nthetaR = theta(rc, 2, z, Bi, Fo)\nT_r = Tinf + thetaR * (Ti - Tinf)\nsphere = 'comsol/3d-sphere-temps.txt'\nt_sphere, Tv_sphere, Tc_sphere, Ts_sphere = np.loadtxt(sphere, skiprows=5,\n unpack=True)\npy.ion()\npy.close('all')\n\n\ndef despine():\n ax = py.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n py.tick_params(axis='both', bottom='off', top='off', left='off', right=\n 'off')\n\n\npy.figure(1)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 1-D Numerical')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(2)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\npy.figure(3)\npy.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')\npy.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')\npy.plot(t, Tv, 'g-', lw=2, label='Tv_1d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Numerical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n",
"step-5": "\"\"\"\nCompare 1-D analytical sphere solution to 1-D numerical and 3-D Comsol solutions\nfor transient heat conduction in solid sphere with constant k and Cp.\n\nAssumptions:\nConvection boundary condition at surface.\nSymmetry about the center of the solid.\nHeat transfer via radiation assumed to be negligable.\nParticle does not shrink or expand in size during pyrolysis.\n\nReference: Wood Handbook 2010\nRequirements: Python 3, NumPy, SciPy, Matplotlib, funcHeatCond, funcTheta, funcOther\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as py\nfrom funcHeatCond import hc3\nfrom funcTheta import theta\nfrom funcOther import vol, Tvol\n\n# Parameters\n# -----------------------------------------------------------------------------\n\nd = 0.001 # diameter of sphere, m\nGb = 0.54 # basic specific gravity, Wood Handbook Table 4-7, (-)\ncp = 1800 # heat capacity, J/kg*K\nk = 0.12 # thermal conductivity, W/mK\nx = 0 # moisture content, %\nh = 350 # heat transfer coefficient, W/m^2*K\nTi = 293 # initial particle temp, K\nTinf = 773 # ambient temp, K\n\n# 1D Numerical Solution for Transient Heat Conduction in Solid Sphere\n# -----------------------------------------------------------------------------\n\n# number of nodes from center of particle (m=0) to surface (m)\nm = 1000\n\n# time vector from 0 to max time\ntmax = 4.0 # max time, s\ndt = 0.01 # time step, s\nnt = tmax/dt # number of time steps\nt = np.arange(0, tmax+dt, dt) # time vector, s\n\n# intraparticle temperature array [T] in Kelvin\n# row = time step, column = node point from 0 (center) to m (surface)\nT = hc3(d, cp, k, Gb, h, Ti, Tinf, 2, m, t)\nTavg = [np.mean(row) for row in T]\n\n# volume average temperature at each time step\nv = vol(d, m)\nTv = Tvol(T, v)\n\n# 1D Analytical Solution for Transient Heat Conduction in Solid Sphere\n# -----------------------------------------------------------------------------\n\nro = d/2 # radius of sphere (a.k.a outer radius), m\nrs = ro/ro # dimensionless surface radius, (-)\nrc = 1e-12/ro # dimensionless center radius, (-)\n\nz = np.arange(0, 1250, 0.1) # range to evaluate the zeta, Bi equation\nz[0] = 1e-12 # prevent divide by zero warning\n\nrho = Gb*1000 # density, kg/m^3\nalpha = k/(rho*cp) # thermal diffusivity biomass, m^2/s\nBi = (h*ro)/k # Biot number, (-)\nFo = (alpha * t) / (ro**2) # Fourier number, (-)\n\n# surface temperature where ro for outer surface, b=2 for sphere\nthetaRo = theta(rs, 2, z, Bi, Fo) # dimensionless temperature profile\nT_o = Tinf + thetaRo*(Ti-Tinf) # convert theta to temperature in Kelvin, K\n\n# center temperature where r for center, b=2 for sphere\nthetaR = theta(rc, 2, z, Bi, Fo) # dimensionless temperature profile\nT_r = Tinf + thetaR*(Ti-Tinf) # convert theta to temperature in Kelvin, K\n\n# 3D Solid Sphere Temperature Data from Comsol\n# -----------------------------------------------------------------------------\n\nsphere = 'comsol/3d-sphere-temps.txt'\nt_sphere, Tv_sphere, Tc_sphere, Ts_sphere = np.loadtxt(sphere, skiprows=5, unpack=True)\n\n# Plot Results\n# -----------------------------------------------------------------------------\n\npy.ion()\npy.close('all')\n\ndef despine():\n ax = py.gca()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')\n\npy.figure(1)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_num')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_num')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 1-D Numerical')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n\npy.figure(2)\npy.plot(t, T_o, 'o', mec='b', mew=2, mfc='none', markevery=5, label='Ts_ana')\npy.plot(t, T_r, 's', mec='g', mew=2, mfc='none', markevery=5, label='Tc_ana')\npy.plot(t_sphere, Ts_sphere, 'r-', lw=2, label='Ts_3d')\npy.plot(t_sphere, Tc_sphere, 'b-', lw=2, label='Tc_3d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Analytical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n\npy.figure(3)\npy.plot(t_sphere, Ts_sphere, 'o', mec='r', mew=2, mfc='none', label='Ts_3d' )\npy.plot(t_sphere, Tc_sphere, 's', mec='b', mew=2, mfc='none', label='Tc_3d')\npy.plot(t_sphere, Tv_sphere, '^', mec='g', mew=2, mfc='none', label='Tv_3d')\npy.plot(t, T[:, -1], 'r-', lw=2, label='Ts_1d')\npy.plot(t, T[:, 0], 'b-', lw=2, label='Tc_1d')\npy.plot(t, Tv, 'g-', lw=2, label='Tv_1d')\n#py.plot(t, Tavg, 'y-', lw=2, label='Tavg_1d')\npy.axhline(Tinf, c='k', ls='--')\npy.ylim(250, 800)\npy.xlim(0, tmax)\npy.title('1-D Numerical and 3-D Comsol')\npy.ylabel('Temperature (K)')\npy.xlabel('Time (s)')\npy.legend(loc='best', numpoints=1, frameon=False)\npy.grid()\ndespine()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from util import AutomataError
from automata import NFA
from base import Node
from copy import copy, deepcopy
from os.path import commonprefix
DEBUG = False
LAMBDA = u'\u03bb'
PHI = u'\u00d8'
def copyDeltas(src):
out = dict()
for k in src:
out[k] = dict()
for k2 in src[k]:
out[k][k2] = copy(src[k][k2])
return out
def replaceNode(nfa, old, new):
if DEBUG:
print('R_Start(%s, %s) ---' % (old, new), nfa)
if old in nfa._deltas:
for input in nfa._deltas[old]:
nfa.addDelta(new, input, nfa._deltas[old][input])
del nfa._deltas[old]
if DEBUG:
print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)
deltas_temp = copyDeltas(nfa._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
if old in deltas_temp[src][input]:
nfa._deltas[src][input].remove(old)
nfa._deltas[src][input].add(new)
if DEBUG:
print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)
def commonsuffix(seq):
def reverse(s):
out = ''
for c in reversed(s):
out += c
return out
seq = [reverse(i) for i in seq]
return reverse(commonprefix(seq))
class NetworkNFA(NFA):
def __init__(self, nfa):
if type(nfa) is not NFA:
raise AutomataError('Can create a NetworkNFA only from an NFA.')
if all([len(i) == 1 for i in nfa.charset]):
self._charset = copy(nfa._charset)
else:
self._charset = set(['{%s}' % i for i in nfa._charset])
self._nodes = copy(nfa._nodes)
self._deltas = copyDeltas(nfa._deltas)
self._start = nfa._start
self._terminals = copy(nfa._terminals)
def addDelta(self, node, input, dest):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if type(dest) is set and all([type(i) is Node for i in dest]):
if len(dest):
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input] = self._deltas[node][input].union(
dest)
else:
self._deltas[node][input] = dest
else:
self._deltas[node] = {input: dest}
elif type(dest) is Node:
if node in self._deltas:
if input in self._deltas[node]:
self._deltas[node][input].add(dest)
else:
self._deltas[node][input] = set([dest])
else:
self._deltas[node] = {input: set([dest])}
else:
raise AutomataError(
'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)
else:
raise AutomataError(
'Delta source must be Node, not %s.' % type(node).__name__)
def remDelta(self, node, input):
if set(input) - (self._charset.union(set('()+*'))):
raise AutomataError('%s contains symbols not in charset.' % input)
if type(node) is Node:
if node in self._deltas and input in self._deltas[node]:
self._deltas[node].pop(input)
if len(self._deltas[node]) == 0:
del self._deltas[node]
else:
raise AutomataError(
'Delta source must be a Node, not %s' % type(node).__name__)
def isValid(self):
if len(self._nodes) == 0:
return False
if self._start not in self._nodes:
return False
for i in self._terminals:
if i not in self._nodes:
return False
if not set(self._deltas.keys()).issubset(self._nodes):
return False
for key in self._deltas:
for char in self._deltas[key]:
if set(char) - (self._charset.union(set('()+*'))):
return False
return True
def apply(self, input, start):
raise AutomataError('NetworkNFA does not allow direct application.')
def __repr__(self):
ret = '<NetworkNFA>\n'
ret += ' Charset: {%s}\n' % ','.join(filter(None, self._charset))
ret += ' Nodes: {%s}\n' % ','.join([i.label for i in self._nodes])
ret += 'Terminals: {%s}\n' % ','.join(
[i.label for i in self._terminals])
ret += ' Start: %s\n' % (self._start and self._start.label)
ret += ' Delta: '
if len(self._deltas):
for qFrom in self._deltas:
for input in self._deltas[qFrom]:
ret += 'D(%s, %s) -> {%s}\n ' % (qFrom.label, input or 'lambda', ','.join(
[i.label for i in self._deltas[qFrom][input]]))
ret = ret.rstrip() + '\n'
else:
ret += 'None\n'
ret += ' Valid: %s\n' % ('Yes' if self.isValid() else 'No')
ret += '</NetworkNFA>'
return ret
def nfa2regex(nfa):
if not nfa.isValid():
raise AutomataError(
'NFA must be in a valid state to be converted to a regex.')
network = NetworkNFA(nfa)
if DEBUG:
print('START', network)
# Take care of multi-terminals
# if len(network.terminals) > 1:
## end = Node('qf')
# network.addNode(end)
# for i in copy(network.terminals):
## network.addDelta(i, '', end)
# network.remTerminal(i)
# network.addTerminal(end)
# Add a dummy start and end nodes
start = Node('qs')
network.addNode(start)
network.addDelta(start, '', network.start)
network.start = start
end = Node('qf')
network.addNode(end)
for i in network.terminals:
network.addDelta(i, '', end)
network.remTerminal(i)
network.addTerminal(end)
if DEBUG:
print('Dummies added: ', network)
# Collapse connections
for src in network.nodes:
delta_temp = network.getDelta(src)
for dest in network.nodes:
chars = []
for input in delta_temp:
if input and dest in delta_temp[input]:
chars.append(input)
if len(chars):
for c in chars:
delta_temp[c].remove(dest)
if len(delta_temp[c]) == 0:
del delta_temp[c]
if len(chars) > 1:
chars = '(' + '+'.join(chars) + ')'
else:
chars = '+'.join(chars)
network.addDelta(src, chars, dest)
if DEBUG:
print('Collapsed: ', network)
# Collect pliable nodes
pliableNodes = list(network.nodes)
pliableNodes.remove(network.start)
for n in network.terminals:
pliableNodes.remove(n)
# Build a distance-from-terminal table
nodeFinalDist = {}
maxDist = len(network.nodes) ** len(network.nodes) # Lazy
for n in network.nodes:
nodeFinalDist[n] = maxDist
nodeFinalDist[network.terminals[0]] = 0
toProcess = list(network.nodes)
toProcess.remove(network.terminals[0])
while len(toProcess):
for node in toProcess:
dests = network.getDelta(node).values()
if len(dests) == 0:
dests = set([])
else:
dests = reduce(set.union, network.getDelta(node).values())
if len(dests) == 0:
toProcess.remove(node)
else:
minDist = min([nodeFinalDist[i] for i in dests])
if minDist != maxDist:
nodeFinalDist[node] = minDist + 1
toProcess.remove(node)
# Sort pliable nodes by distance from terminal
pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)
if DEBUG:
print('Pliables: ', pliableNodes)
for node in pliableNodes:
# Remove Node
network.remNode(node)
# Save delta
delta = copy(network.getDelta(node))
# Convert loops to regex
loops = []
for input in delta:
if node in delta[input]:
if len(input):
loops.append(input)
loopRegex = '+'.join(loops)
if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):
loopRegex = '(' + loopRegex + ')*'
elif len(loopRegex) >= 1:
loopRegex = loopRegex + '*'
# Remove loops
for input in copy(delta):
if delta[input] == set([node]):
del delta[input]
elif node in delta[input]:
delta[input].remove(node)
# Search lambda-closure equivalence
if '' in delta and (len(delta) != 1 or len(delta['']) != 1):
eligible = []
for dest in delta['']:
delta_temp = network.getDelta(dest)
if '' in delta_temp and node in delta_temp['']:
eligible.append(dest)
if len(eligible):
replaceNode(network, node, eligible[0])
continue
# Remove delta
try:
del network._deltas[node]
except KeyError: # No deltas remaining, had only loops
continue
if DEBUG:
print('Working on connections: ', node, delta)
# Check all possible connections through this node
deltas_temp = copyDeltas(network._deltas)
for src in deltas_temp:
for input in deltas_temp[src]:
tempDeltaDest = network.getDelta(src)[input]
if node in tempDeltaDest:
tempDeltaDest.remove(node)
if len(tempDeltaDest) == 0:
network.remDelta(src, input)
for input2 in delta:
for dest in delta[input2]:
if not (src == dest and (input + loopRegex + input2) == ''):
network.addDelta(
src, input + loopRegex + input2, dest)
if DEBUG:
print('New Delta:', src, input,
loopRegex, input2, dest, network)
# Extract common prefix/suffix
branches = network.getDelta(network.start).keys()
if len(branches) == 1:
regex = branches[0]
else:
prefix = commonprefix(branches)
suffix = commonsuffix(branches)
branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]
for i in branches]
branches.sort(key=len)
if len(prefix) or len(suffix):
regex = prefix + \
'(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix
else:
regex = '+'.join([i or LAMBDA for i in branches]) or PHI
return regex
|
normal
|
{
"blob_id": "2fe20f28fc7bba6b8188f5068e2b3c8b87c15edc",
"index": 94,
"step-1": "<mask token>\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\n<mask token>\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n",
"step-4": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\nDEBUG = False\nLAMBDA = u'λ'\nPHI = u'Ø'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n if all([(len(i) == 1) for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set([('{%s}' % i) for i in nfa._charset])\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if type(dest) is set and all([(type(i) is Node) for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][\n input].union(dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.'\n % type(dest).__name__)\n else:\n raise AutomataError('Delta source must be Node, not %s.' % type\n (node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - self._charset.union(set('()+*')):\n raise AutomataError('%s contains symbols not in charset.' % input)\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError('Delta source must be a Node, not %s' %\n type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n for i in self._terminals:\n if i not in self._nodes:\n return False\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - self._charset.union(set('()+*')):\n return False\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join([i.label for i in self.\n _terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, \n input or 'lambda', ','.join([i.label for i in self.\n _deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n network = NetworkNFA(nfa)\n if DEBUG:\n print('START', network)\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes)\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n for node in pliableNodes:\n network.remNode(node)\n delta = copy(network.getDelta(node))\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1\n ] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n try:\n del network._deltas[node]\n except KeyError:\n continue\n if DEBUG:\n print('Working on connections: ', node, delta)\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and input + loopRegex +\n input2 == ''):\n network.addDelta(src, input + loopRegex +\n input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [(i[len(prefix):-len(suffix)] if len(suffix) else i[len(\n prefix):]) for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + '(' + '+'.join([(i or LAMBDA) for i in branches]\n ) + ')' + suffix\n else:\n regex = '+'.join([(i or LAMBDA) for i in branches]) or PHI\n return regex\n",
"step-5": "from util import AutomataError\nfrom automata import NFA\nfrom base import Node\nfrom copy import copy, deepcopy\nfrom os.path import commonprefix\n\nDEBUG = False\n\nLAMBDA = u'\\u03bb'\nPHI = u'\\u00d8'\n\n\ndef copyDeltas(src):\n out = dict()\n for k in src:\n out[k] = dict()\n for k2 in src[k]:\n out[k][k2] = copy(src[k][k2])\n\n return out\n\n\ndef replaceNode(nfa, old, new):\n if DEBUG:\n print('R_Start(%s, %s) ---' % (old, new), nfa)\n if old in nfa._deltas:\n for input in nfa._deltas[old]:\n nfa.addDelta(new, input, nfa._deltas[old][input])\n del nfa._deltas[old]\n if DEBUG:\n print('R_SwitchedSource(%s, %s) ---' % (old, new), nfa)\n\n deltas_temp = copyDeltas(nfa._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n if old in deltas_temp[src][input]:\n nfa._deltas[src][input].remove(old)\n nfa._deltas[src][input].add(new)\n if DEBUG:\n print('R_SwitchedDest(%s, %s) ---' % (old, new), nfa)\n\n\ndef commonsuffix(seq):\n def reverse(s):\n out = ''\n for c in reversed(s):\n out += c\n return out\n\n seq = [reverse(i) for i in seq]\n return reverse(commonprefix(seq))\n\n\nclass NetworkNFA(NFA):\n def __init__(self, nfa):\n if type(nfa) is not NFA:\n raise AutomataError('Can create a NetworkNFA only from an NFA.')\n\n if all([len(i) == 1 for i in nfa.charset]):\n self._charset = copy(nfa._charset)\n else:\n self._charset = set(['{%s}' % i for i in nfa._charset])\n\n self._nodes = copy(nfa._nodes)\n self._deltas = copyDeltas(nfa._deltas)\n self._start = nfa._start\n self._terminals = copy(nfa._terminals)\n\n def addDelta(self, node, input, dest):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if type(dest) is set and all([type(i) is Node for i in dest]):\n if len(dest):\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input] = self._deltas[node][input].union(\n dest)\n else:\n self._deltas[node][input] = dest\n else:\n self._deltas[node] = {input: dest}\n elif type(dest) is Node:\n if node in self._deltas:\n if input in self._deltas[node]:\n self._deltas[node][input].add(dest)\n else:\n self._deltas[node][input] = set([dest])\n else:\n self._deltas[node] = {input: set([dest])}\n else:\n raise AutomataError(\n 'Delta destination must be a Node or a set of nodes, not %s.' % type(dest).__name__)\n else:\n raise AutomataError(\n 'Delta source must be Node, not %s.' % type(node).__name__)\n\n def remDelta(self, node, input):\n if set(input) - (self._charset.union(set('()+*'))):\n raise AutomataError('%s contains symbols not in charset.' % input)\n\n if type(node) is Node:\n if node in self._deltas and input in self._deltas[node]:\n self._deltas[node].pop(input)\n if len(self._deltas[node]) == 0:\n del self._deltas[node]\n else:\n raise AutomataError(\n 'Delta source must be a Node, not %s' % type(node).__name__)\n\n def isValid(self):\n if len(self._nodes) == 0:\n return False\n if self._start not in self._nodes:\n return False\n\n for i in self._terminals:\n if i not in self._nodes:\n return False\n\n if not set(self._deltas.keys()).issubset(self._nodes):\n return False\n\n for key in self._deltas:\n for char in self._deltas[key]:\n if set(char) - (self._charset.union(set('()+*'))):\n return False\n\n return True\n\n def apply(self, input, start):\n raise AutomataError('NetworkNFA does not allow direct application.')\n\n def __repr__(self):\n ret = '<NetworkNFA>\\n'\n ret += ' Charset: {%s}\\n' % ','.join(filter(None, self._charset))\n ret += ' Nodes: {%s}\\n' % ','.join([i.label for i in self._nodes])\n ret += 'Terminals: {%s}\\n' % ','.join(\n [i.label for i in self._terminals])\n ret += ' Start: %s\\n' % (self._start and self._start.label)\n ret += ' Delta: '\n if len(self._deltas):\n for qFrom in self._deltas:\n for input in self._deltas[qFrom]:\n ret += 'D(%s, %s) -> {%s}\\n ' % (qFrom.label, input or 'lambda', ','.join(\n [i.label for i in self._deltas[qFrom][input]]))\n ret = ret.rstrip() + '\\n'\n else:\n ret += 'None\\n'\n ret += ' Valid: %s\\n' % ('Yes' if self.isValid() else 'No')\n ret += '</NetworkNFA>'\n\n return ret\n\n\ndef nfa2regex(nfa):\n if not nfa.isValid():\n raise AutomataError(\n 'NFA must be in a valid state to be converted to a regex.')\n\n network = NetworkNFA(nfa)\n\n if DEBUG:\n print('START', network)\n\n# Take care of multi-terminals\n# if len(network.terminals) > 1:\n## end = Node('qf')\n# network.addNode(end)\n# for i in copy(network.terminals):\n## network.addDelta(i, '', end)\n# network.remTerminal(i)\n# network.addTerminal(end)\n\n # Add a dummy start and end nodes\n start = Node('qs')\n network.addNode(start)\n network.addDelta(start, '', network.start)\n network.start = start\n\n end = Node('qf')\n network.addNode(end)\n for i in network.terminals:\n network.addDelta(i, '', end)\n network.remTerminal(i)\n network.addTerminal(end)\n if DEBUG:\n print('Dummies added: ', network)\n\n # Collapse connections\n for src in network.nodes:\n delta_temp = network.getDelta(src)\n for dest in network.nodes:\n chars = []\n for input in delta_temp:\n if input and dest in delta_temp[input]:\n chars.append(input)\n\n if len(chars):\n for c in chars:\n delta_temp[c].remove(dest)\n if len(delta_temp[c]) == 0:\n del delta_temp[c]\n\n if len(chars) > 1:\n chars = '(' + '+'.join(chars) + ')'\n else:\n chars = '+'.join(chars)\n network.addDelta(src, chars, dest)\n if DEBUG:\n print('Collapsed: ', network)\n\n # Collect pliable nodes\n pliableNodes = list(network.nodes)\n pliableNodes.remove(network.start)\n for n in network.terminals:\n pliableNodes.remove(n)\n\n # Build a distance-from-terminal table\n nodeFinalDist = {}\n maxDist = len(network.nodes) ** len(network.nodes) # Lazy\n for n in network.nodes:\n nodeFinalDist[n] = maxDist\n\n nodeFinalDist[network.terminals[0]] = 0\n toProcess = list(network.nodes)\n toProcess.remove(network.terminals[0])\n\n while len(toProcess):\n for node in toProcess:\n dests = network.getDelta(node).values()\n if len(dests) == 0:\n dests = set([])\n else:\n dests = reduce(set.union, network.getDelta(node).values())\n\n if len(dests) == 0:\n toProcess.remove(node)\n else:\n minDist = min([nodeFinalDist[i] for i in dests])\n if minDist != maxDist:\n nodeFinalDist[node] = minDist + 1\n toProcess.remove(node)\n\n # Sort pliable nodes by distance from terminal\n pliableNodes.sort(key=lambda x: nodeFinalDist[x], reverse=True)\n if DEBUG:\n print('Pliables: ', pliableNodes)\n\n for node in pliableNodes:\n # Remove Node\n network.remNode(node)\n\n # Save delta\n delta = copy(network.getDelta(node))\n\n # Convert loops to regex\n loops = []\n for input in delta:\n if node in delta[input]:\n if len(input):\n loops.append(input)\n loopRegex = '+'.join(loops)\n if len(loopRegex) > 1 and not (loopRegex[0] == '(' and loopRegex[-1] == ')'):\n loopRegex = '(' + loopRegex + ')*'\n elif len(loopRegex) >= 1:\n loopRegex = loopRegex + '*'\n\n # Remove loops\n for input in copy(delta):\n if delta[input] == set([node]):\n del delta[input]\n elif node in delta[input]:\n delta[input].remove(node)\n\n # Search lambda-closure equivalence\n if '' in delta and (len(delta) != 1 or len(delta['']) != 1):\n eligible = []\n for dest in delta['']:\n delta_temp = network.getDelta(dest)\n if '' in delta_temp and node in delta_temp['']:\n eligible.append(dest)\n\n if len(eligible):\n replaceNode(network, node, eligible[0])\n continue\n\n # Remove delta\n try:\n del network._deltas[node]\n except KeyError: # No deltas remaining, had only loops\n continue\n\n if DEBUG:\n print('Working on connections: ', node, delta)\n # Check all possible connections through this node\n deltas_temp = copyDeltas(network._deltas)\n for src in deltas_temp:\n for input in deltas_temp[src]:\n tempDeltaDest = network.getDelta(src)[input]\n if node in tempDeltaDest:\n tempDeltaDest.remove(node)\n if len(tempDeltaDest) == 0:\n network.remDelta(src, input)\n\n for input2 in delta:\n for dest in delta[input2]:\n if not (src == dest and (input + loopRegex + input2) == ''):\n network.addDelta(\n src, input + loopRegex + input2, dest)\n if DEBUG:\n print('New Delta:', src, input,\n loopRegex, input2, dest, network)\n\n # Extract common prefix/suffix\n branches = network.getDelta(network.start).keys()\n if len(branches) == 1:\n regex = branches[0]\n else:\n prefix = commonprefix(branches)\n suffix = commonsuffix(branches)\n branches = [i[len(prefix):-len(suffix)] if len(suffix) else i[len(prefix):]\n for i in branches]\n branches.sort(key=len)\n if len(prefix) or len(suffix):\n regex = prefix + \\\n '(' + '+'.join([i or LAMBDA for i in branches]) + ')' + suffix\n else:\n regex = '+'.join([i or LAMBDA for i in branches]) or PHI\n\n return regex\n",
"step-ids": [
8,
9,
11,
13,
14
]
}
|
[
8,
9,
11,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for key in default_registry:
env_name = key
if key[0].isdigit():
env_name = key.replace('3', 'Three')
if not env_name.isidentifier():
logger.warning(
f'Environment id {env_name} can not be registered since it isnot a valid identifier name.'
)
continue
locals()[env_name] = PettingZooEnvFactory(key)
<|reserved_special_token_1|>
from mlagents_envs.registry import default_registry
from mlagents_envs.envs.pettingzoo_env_factory import logger, PettingZooEnvFactory
for key in default_registry:
env_name = key
if key[0].isdigit():
env_name = key.replace('3', 'Three')
if not env_name.isidentifier():
logger.warning(
f'Environment id {env_name} can not be registered since it isnot a valid identifier name.'
)
continue
locals()[env_name] = PettingZooEnvFactory(key)
<|reserved_special_token_1|>
from mlagents_envs.registry import default_registry
from mlagents_envs.envs.pettingzoo_env_factory import logger, PettingZooEnvFactory
# Register each environment in default_registry as a PettingZooEnv
for key in default_registry:
env_name = key
if key[0].isdigit():
env_name = key.replace("3", "Three")
if not env_name.isidentifier():
logger.warning(
f"Environment id {env_name} can not be registered since it is"
f"not a valid identifier name."
)
continue
locals()[env_name] = PettingZooEnvFactory(key)
|
flexible
|
{
"blob_id": "3bec28561c306a46c43dafc8bdc2e01f2ea06180",
"index": 9491,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key in default_registry:\n env_name = key\n if key[0].isdigit():\n env_name = key.replace('3', 'Three')\n if not env_name.isidentifier():\n logger.warning(\n f'Environment id {env_name} can not be registered since it isnot a valid identifier name.'\n )\n continue\n locals()[env_name] = PettingZooEnvFactory(key)\n",
"step-3": "from mlagents_envs.registry import default_registry\nfrom mlagents_envs.envs.pettingzoo_env_factory import logger, PettingZooEnvFactory\nfor key in default_registry:\n env_name = key\n if key[0].isdigit():\n env_name = key.replace('3', 'Three')\n if not env_name.isidentifier():\n logger.warning(\n f'Environment id {env_name} can not be registered since it isnot a valid identifier name.'\n )\n continue\n locals()[env_name] = PettingZooEnvFactory(key)\n",
"step-4": "from mlagents_envs.registry import default_registry\nfrom mlagents_envs.envs.pettingzoo_env_factory import logger, PettingZooEnvFactory\n\n# Register each environment in default_registry as a PettingZooEnv\nfor key in default_registry:\n env_name = key\n if key[0].isdigit():\n env_name = key.replace(\"3\", \"Three\")\n if not env_name.isidentifier():\n logger.warning(\n f\"Environment id {env_name} can not be registered since it is\"\n f\"not a valid identifier name.\"\n )\n continue\n locals()[env_name] = PettingZooEnvFactory(key)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Test cases for ldaptor.protocols.ldap.delta
"""
from twisted.trial import unittest
from ldaptor import delta, entry, attributeset, inmemory
from ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(
None,
dn="cn=foo,dc=example,dc=com",
attributes={
"objectClass": ["person"],
"cn": ["foo", "thud"],
"sn": ["bar"],
"more": ["junk"],
},
)
def testAddOld(self):
mod = delta.Add("cn", ["quux"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud", "quux"])
def testAddNew(self):
mod = delta.Add("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete(self):
mod = delta.Delete("cn", ["thud"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo"])
def testDeleteAll(self):
mod = delta.Delete("more")
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["cn"], ["foo", "thud"])
def testDelete_FailOnNonExistingAttributeType_All(self):
mod = delta.Delete("notexist", [])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeType_OneValue(self):
mod = delta.Delete("notexist", ["a"])
self.assertRaises(KeyError, mod.patch, self.foo)
def testDelete_FailOnNonExistingAttributeValue(self):
mod = delta.Delete("cn", ["notexist"])
self.assertRaises(LookupError, mod.patch, self.foo)
def testReplace_Add(self):
mod = delta.Replace("stuff", ["val1", "val2"])
mod.patch(self.foo)
self.assertEqual(self.foo["stuff"], ["val1", "val2"])
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Modify(self):
mod = delta.Replace("sn", ["baz"])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["baz"])
self.assertEqual(self.foo["more"], ["junk"])
def testReplace_Delete_Existing(self):
mod = delta.Replace("more", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertFalse("more" in self.foo)
def testReplace_Delete_NonExisting(self):
mod = delta.Replace("nonExisting", [])
mod.patch(self.foo)
self.assertFalse("stuff" in self.foo)
self.assertEqual(self.foo["sn"], ["bar"])
self.assertEqual(self.foo["more"], ["junk"])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m = delta.Add("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
add: foo
foo: bar
foo: baz
-
""",
)
def testDelete(self):
m = delta.Delete("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
foo: bar
foo: baz
-
""",
)
def testDeleteAll(self):
m = delta.Delete("foo")
self.assertEqual(
m.asLDIF(),
b"""\
delete: foo
-
""",
)
def testReplace(self):
m = delta.Replace("foo", ["bar", "baz"])
self.assertEqual(
m.asLDIF(),
b"""\
replace: foo
foo: bar
foo: baz
-
""",
)
def testReplaceAll(self):
m = delta.Replace("thud")
self.assertEqual(
m.asLDIF(),
b"""\
replace: thud
-
""",
)
def testAddBase64(self):
"""
LDIF attribute representation is base64 encoded
if attribute value contains nonprintable characters
or starts with reserved characters
"""
m = delta.Add("attr", [":value1", "value\n\r2"])
self.assertEqual(
m.asLDIF(),
b"""\
add: attr
attr:: OnZhbHVlMQ==
attr:: dmFsdWUKDTI=
-
""",
)
class OperationTestCase(unittest.TestCase):
"""
Test case for operations on a LDAP tree.
"""
def getRoot(self):
"""
Returns a new LDAP root for dc=example,dc=com.
"""
return inmemory.ReadOnlyInMemoryLDAPEntry(
dn=distinguishedname.DistinguishedName("dc=example,dc=com")
)
class TestAddOpLDIF(OperationTestCase):
"""
Unit tests for `AddOp`.
"""
def testAsLDIF(self):
"""
It will return the LDIF representation of the operation.
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"foo": ["bar", "baz"],
"quux": ["thud"],
},
)
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: add
foo: bar
foo: baz
quux: thud
""",
result,
)
def testAddOpEqualitySameEntry(self):
"""
Objects are equal when the have the same LDAP entry.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(first, second)
def testAddOpInequalityDifferentEntry(self):
"""
Objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=First Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(first, second)
def testAddOpInequalityNoEntryObject(self):
"""
Objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
sut = delta.AddOp(team_entry)
self.assertNotEqual(sut, {"foo": ["same", "attributes"]})
def testAddOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["same", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testAddOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["one", "attributes"]},
)
second_entry = entry.BaseLDAPEntry(
dn="ou=Duplicate Team, dc=example,dc=com",
attributes={"foo": ["other", "attributes"]},
)
first = delta.AddOp(first_entry)
second = delta.AddOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testAddOp_DNExists(self):
"""
It fails to perform the `add` operation for an existing entry.
"""
root = self.getRoot()
root.addChild(
rdn="ou=Existing Team",
attributes={
"objectClass": ["a", "b"],
"ou": ["HR"],
},
)
hr_entry = entry.BaseLDAPEntry(
dn="ou=Existing Team, dc=example,dc=com",
attributes={"foo": ["dont", "care"]},
)
sut = delta.AddOp(hr_entry)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.AddOp(
entry.BaseLDAPEntry(
dn="dc=example,dc=com",
attributes={
"bar": ["foo"],
"foo": ["bar"],
},
)
)
self.assertEqual(
repr(sut),
"AddOp(BaseLDAPEntry('dc=example,dc=com', "
"{'bar': ['foo'], 'foo': ['bar']}))",
)
class TestDeleteOpLDIF(OperationTestCase):
"""
Unit tests for DeleteOp.
"""
def testAsLDIF(self):
"""
It return the LDIF representation of the delete operation.
"""
sut = delta.DeleteOp("dc=example,dc=com")
result = sut.asLDIF()
self.assertEqual(
b"""dn: dc=example,dc=com
changetype: delete
""",
result,
)
def testDeleteOpEqualitySameDN(self):
"""
Objects are equal when the have the same DN.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(first, second)
def testDeleteOpEqualityEqualDN(self):
"""
DeleteOp objects are equal if their DNs are equal.
"""
first_dn = distinguishedname.DistinguishedName(
stringValue="ou=Team,dc=example,dc=com"
)
first = delta.DeleteOp(first_dn)
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example, dc=com")
second = delta.DeleteOp(second_entry)
third = delta.DeleteOp("ou=Team, dc=example,dc=com")
self.assertEqual(first, second)
self.assertEqual(first, third)
def testDeleteOpInequalityDifferentEntry(self):
"""
DeleteOp objects are not equal when the have different LDAP entries.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(first, second)
def testDeleteOpInequalityNoEntryObject(self):
"""
DeleteOp objects is not equal with random objects.
"""
team_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
sut = delta.DeleteOp(team_entry)
self.assertNotEqual(sut, "ou=Team, dc=example,dc=com")
def testDeleteOpHashSimilar(self):
"""
Objects which are equal have the same hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertEqual(hash(first), hash(second))
def testDeleteOpHashDifferent(self):
"""
Objects which are not equal have different hash.
"""
first_entry = entry.BaseLDAPEntry(dn="ou=Team, dc=example,dc=com")
second_entry = entry.BaseLDAPEntry(dn="ou=Cowboys, dc=example,dc=com")
first = delta.DeleteOp(first_entry)
second = delta.DeleteOp(second_entry)
self.assertNotEqual(hash(first), hash(second))
def testDeleteOp_DNNotFound(self):
"""
If fail to delete when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.DeleteOp("cn=nope,dc=example,dc=com")
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testDeleteOpInvalidDN(self):
"""
Invalid type of DN raises AssertionError
"""
self.assertRaises(AssertionError, delta.DeleteOp, 0)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.DeleteOp("dc=example,dc=com")
self.assertEqual(repr(sut), "DeleteOp('dc=example,dc=com')")
class TestModifyOp(OperationTestCase):
"""
Unit tests for ModifyOp.
"""
def testAsLDIF(self):
"""
It will return a LDIF representation of the contained operations.
"""
sut = delta.ModifyOp(
"cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com",
[
delta.Add(
"postaladdress",
["123 Anystreet $ Sunnyvale, CA $ 94086"],
),
delta.Delete("description"),
delta.Replace(
"telephonenumber",
["+1 408 555 1234", "+1 408 555 5678"],
),
delta.Delete("facsimiletelephonenumber", ["+1 408 555 9876"]),
],
)
result = sut.asLDIF()
self.assertEqual(
b"""dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com
changetype: modify
add: postaladdress
postaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086
-
delete: description
-
replace: telephonenumber
telephonenumber: +1 408 555 1234
telephonenumber: +1 408 555 5678
-
delete: facsimiletelephonenumber
facsimiletelephonenumber: +1 408 555 9876
-
""",
result,
)
def testInequalityDiffertnDN(self):
"""
Modify operations for different DN are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityDifferentModifications(self):
"""
Modify operations with different modifications are not equal
"""
first = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Add("description")])
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertNotEqual(first, second)
def testInequalityNotModifyOP(self):
"""
Modify operations are not equal with other object types.
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertNotEqual("cn=john,dc=example,dc=com", sut)
def testInequalityDiffertnOperations(self):
"""
Modify operations for same DN but different operations are not equal.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=doe,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first, second)
def testHashEquality(self):
"""
Modify operations can be hashed and equal objects have the same
hash.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
self.assertEqual(first, second)
self.assertEqual(
first.asLDIF(),
second.asLDIF(),
"LDIF equality is a precondition for valid hash values",
)
self.assertEqual(hash(first), hash(second))
def testHashInequality(self):
"""
Different modify operations have different hash values.
"""
first = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("description")]
)
second = delta.ModifyOp(
"cn=john,dc=example,dc=com", [delta.Delete("homeDirectory")]
)
self.assertNotEqual(first.asLDIF(), second.asLDIF())
self.assertNotEqual(hash(first), hash(second))
def testModifyOp_DNNotFound(self):
"""
If fail to modify when the RDN does not exists.
"""
root = self.getRoot()
sut = delta.ModifyOp(
"cn=nope,dc=example,dc=com",
[delta.Add("foo", ["bar"])],
)
deferred = sut.patch(root)
failure = self.failureResultOf(deferred)
self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)
def testRepr(self):
"""
Getting string representation
"""
sut = delta.ModifyOp("cn=john,dc=example,dc=com", [delta.Delete("description")])
self.assertEqual(
repr(sut),
"ModifyOp(dn='cn=john,dc=example,dc=com', "
"modifications=[Delete('description', [])])",
)
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Add("k", ["b", "c", "d"])
self.assertEqual(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = delta.Delete("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = attributeset.LDAPAttributeSet("k", ["b", "c", "d"])
self.assertNotEqual(a, b)
def testEquality_List_False(self):
a = delta.Add("k", ["b", "c", "d"])
b = ["b", "c", "d"]
self.assertNotEqual(a, b)
|
normal
|
{
"blob_id": "8054ccb07d0130b75927a4bb9b712ce3d564b8fe",
"index": 4702,
"step-1": "<mask token>\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n <mask token>\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-2": "<mask token>\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n <mask token>\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n <mask token>\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-3": "<mask token>\n\n\nclass TestModifications(unittest.TestCase):\n\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',\n attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],\n 'sn': ['bar'], 'more': ['junk']})\n <mask token>\n\n def testAddNew(self):\n mod = delta.Add('stuff', ['val1', 'val2'])\n mod.patch(self.foo)\n self.assertEqual(self.foo['stuff'], ['val1', 'val2'])\n self.assertEqual(self.foo['cn'], ['foo', 'thud'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace('nonExisting', [])\n mod.patch(self.foo)\n self.assertFalse('stuff' in self.foo)\n self.assertEqual(self.foo['sn'], ['bar'])\n self.assertEqual(self.foo['more'], ['junk'])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n\n def testAdd(self):\n m = delta.Add('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'add: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDeleteAll(self):\n m = delta.Delete('foo')\n self.assertEqual(m.asLDIF(), b'delete: foo\\n-\\n')\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-4": "<mask token>\n\n\nclass TestModifications(unittest.TestCase):\n\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(None, dn='cn=foo,dc=example,dc=com',\n attributes={'objectClass': ['person'], 'cn': ['foo', 'thud'],\n 'sn': ['bar'], 'more': ['junk']})\n <mask token>\n\n def testAddNew(self):\n mod = delta.Add('stuff', ['val1', 'val2'])\n mod.patch(self.foo)\n self.assertEqual(self.foo['stuff'], ['val1', 'val2'])\n self.assertEqual(self.foo['cn'], ['foo', 'thud'])\n <mask token>\n <mask token>\n\n def testDelete_FailOnNonExistingAttributeType_All(self):\n mod = delta.Delete('notexist', [])\n self.assertRaises(KeyError, mod.patch, self.foo)\n <mask token>\n\n def testDelete_FailOnNonExistingAttributeValue(self):\n mod = delta.Delete('cn', ['notexist'])\n self.assertRaises(LookupError, mod.patch, self.foo)\n <mask token>\n <mask token>\n <mask token>\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace('nonExisting', [])\n mod.patch(self.foo)\n self.assertFalse('stuff' in self.foo)\n self.assertEqual(self.foo['sn'], ['bar'])\n self.assertEqual(self.foo['more'], ['junk'])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n\n def testAdd(self):\n m = delta.Add('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'add: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDelete(self):\n m = delta.Delete('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'delete: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testDeleteAll(self):\n m = delta.Delete('foo')\n self.assertEqual(m.asLDIF(), b'delete: foo\\n-\\n')\n\n def testReplace(self):\n m = delta.Replace('foo', ['bar', 'baz'])\n self.assertEqual(m.asLDIF(), b'replace: foo\\nfoo: bar\\nfoo: baz\\n-\\n')\n\n def testReplaceAll(self):\n m = delta.Replace('thud')\n self.assertEqual(m.asLDIF(), b'replace: thud\\n-\\n')\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add('attr', [':value1', 'value\\n\\r2'])\n self.assertEqual(m.asLDIF(),\n b'add: attr\\nattr:: OnZhbHVlMQ==\\nattr:: dmFsdWUKDTI=\\n-\\n')\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(dn=distinguishedname.\n DistinguishedName('dc=example,dc=com'))\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'foo': ['bar', 'baz'], 'quux': ['thud']}))\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: dc=example,dc=com\\nchangetype: add\\nfoo: bar\\nfoo: baz\\nquux: thud\\n\\n'\n , result)\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['same',\n 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=First Team, dc=example,dc=com', attributes={'foo': ['other',\n 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n sut = delta.AddOp(team_entry)\n self.assertNotEqual(sut, {'foo': ['same', 'attributes']})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'same', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'one', 'attributes']})\n second_entry = entry.BaseLDAPEntry(dn=\n 'ou=Duplicate Team, dc=example,dc=com', attributes={'foo': [\n 'other', 'attributes']})\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(rdn='ou=Existing Team', attributes={'objectClass': [\n 'a', 'b'], 'ou': ['HR']})\n hr_entry = entry.BaseLDAPEntry(dn=\n 'ou=Existing Team, dc=example,dc=com', attributes={'foo': [\n 'dont', 'care']})\n sut = delta.AddOp(hr_entry)\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(entry.BaseLDAPEntry(dn='dc=example,dc=com',\n attributes={'bar': ['foo'], 'foo': ['bar']}))\n self.assertEqual(repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', {'bar': ['foo'], 'foo': ['bar']}))\"\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n result = sut.asLDIF()\n self.assertEqual(b'dn: dc=example,dc=com\\nchangetype: delete\\n\\n',\n result)\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(stringValue=\n 'ou=Team,dc=example,dc=com')\n first = delta.DeleteOp(first_dn)\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example, dc=com')\n second = delta.DeleteOp(second_entry)\n third = delta.DeleteOp('ou=Team, dc=example,dc=com')\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n sut = delta.DeleteOp(team_entry)\n self.assertNotEqual(sut, 'ou=Team, dc=example,dc=com')\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn='ou=Team, dc=example,dc=com')\n second_entry = entry.BaseLDAPEntry(dn='ou=Cowboys, dc=example,dc=com')\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp('cn=nope,dc=example,dc=com')\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp('dc=example,dc=com')\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp('cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com',\n [delta.Add('postaladdress', [\n '123 Anystreet $ Sunnyvale, CA $ 94086']), delta.Delete(\n 'description'), delta.Replace('telephonenumber', [\n '+1 408 555 1234', '+1 408 555 5678']), delta.Delete(\n 'facsimiletelephonenumber', ['+1 408 555 9876'])])\n result = sut.asLDIF()\n self.assertEqual(\n b'dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\\nchangetype: modify\\nadd: postaladdress\\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\\n-\\ndelete: description\\n-\\nreplace: telephonenumber\\ntelephonenumber: +1 408 555 1234\\ntelephonenumber: +1 408 555 5678\\n-\\ndelete: facsimiletelephonenumber\\nfacsimiletelephonenumber: +1 408 555 9876\\n-\\n\\n'\n , result)\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Add(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertNotEqual('cn=john,dc=example,dc=com', sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=doe,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(first, second)\n self.assertEqual(first.asLDIF(), second.asLDIF(),\n 'LDIF equality is a precondition for valid hash values')\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n second = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'homeDirectory')])\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp('cn=nope,dc=example,dc=com', [delta.Add('foo',\n ['bar'])])\n deferred = sut.patch(root)\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp('cn=john,dc=example,dc=com', [delta.Delete(\n 'description')])\n self.assertEqual(repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', modifications=[Delete('description', [])])\"\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n\n def testEquality_Add_True(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Add('k', ['b', 'c', 'd'])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = delta.Delete('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add('k', ['b', 'c', 'd'])\n b = ['b', 'c', 'd']\n self.assertNotEqual(a, b)\n",
"step-5": "\"\"\"\nTest cases for ldaptor.protocols.ldap.delta\n\"\"\"\n\nfrom twisted.trial import unittest\nfrom ldaptor import delta, entry, attributeset, inmemory\nfrom ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors\n\n\nclass TestModifications(unittest.TestCase):\n def setUp(self):\n self.foo = ldapsyntax.LDAPEntry(\n None,\n dn=\"cn=foo,dc=example,dc=com\",\n attributes={\n \"objectClass\": [\"person\"],\n \"cn\": [\"foo\", \"thud\"],\n \"sn\": [\"bar\"],\n \"more\": [\"junk\"],\n },\n )\n\n def testAddOld(self):\n mod = delta.Add(\"cn\", [\"quux\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\", \"quux\"])\n\n def testAddNew(self):\n mod = delta.Add(\"stuff\", [\"val1\", \"val2\"])\n mod.patch(self.foo)\n\n self.assertEqual(self.foo[\"stuff\"], [\"val1\", \"val2\"])\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\"])\n\n def testDelete(self):\n mod = delta.Delete(\"cn\", [\"thud\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\"])\n\n def testDeleteAll(self):\n mod = delta.Delete(\"more\")\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"cn\"], [\"foo\", \"thud\"])\n\n def testDelete_FailOnNonExistingAttributeType_All(self):\n mod = delta.Delete(\"notexist\", [])\n self.assertRaises(KeyError, mod.patch, self.foo)\n\n def testDelete_FailOnNonExistingAttributeType_OneValue(self):\n mod = delta.Delete(\"notexist\", [\"a\"])\n self.assertRaises(KeyError, mod.patch, self.foo)\n\n def testDelete_FailOnNonExistingAttributeValue(self):\n mod = delta.Delete(\"cn\", [\"notexist\"])\n self.assertRaises(LookupError, mod.patch, self.foo)\n\n def testReplace_Add(self):\n mod = delta.Replace(\"stuff\", [\"val1\", \"val2\"])\n mod.patch(self.foo)\n\n self.assertEqual(self.foo[\"stuff\"], [\"val1\", \"val2\"])\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n def testReplace_Modify(self):\n mod = delta.Replace(\"sn\", [\"baz\"])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"baz\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n def testReplace_Delete_Existing(self):\n mod = delta.Replace(\"more\", [])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertFalse(\"more\" in self.foo)\n\n def testReplace_Delete_NonExisting(self):\n mod = delta.Replace(\"nonExisting\", [])\n mod.patch(self.foo)\n\n self.assertFalse(\"stuff\" in self.foo)\n self.assertEqual(self.foo[\"sn\"], [\"bar\"])\n self.assertEqual(self.foo[\"more\"], [\"junk\"])\n\n\nclass TestModificationOpLDIF(unittest.TestCase):\n def testAdd(self):\n m = delta.Add(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nadd: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testDelete(self):\n m = delta.Delete(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\ndelete: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testDeleteAll(self):\n m = delta.Delete(\"foo\")\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\ndelete: foo\n-\n\"\"\",\n )\n\n def testReplace(self):\n m = delta.Replace(\"foo\", [\"bar\", \"baz\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nreplace: foo\nfoo: bar\nfoo: baz\n-\n\"\"\",\n )\n\n def testReplaceAll(self):\n m = delta.Replace(\"thud\")\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nreplace: thud\n-\n\"\"\",\n )\n\n def testAddBase64(self):\n \"\"\"\n LDIF attribute representation is base64 encoded\n if attribute value contains nonprintable characters\n or starts with reserved characters\n \"\"\"\n m = delta.Add(\"attr\", [\":value1\", \"value\\n\\r2\"])\n self.assertEqual(\n m.asLDIF(),\n b\"\"\"\\\nadd: attr\nattr:: OnZhbHVlMQ==\nattr:: dmFsdWUKDTI=\n-\n\"\"\",\n )\n\n\nclass OperationTestCase(unittest.TestCase):\n \"\"\"\n Test case for operations on a LDAP tree.\n \"\"\"\n\n def getRoot(self):\n \"\"\"\n Returns a new LDAP root for dc=example,dc=com.\n \"\"\"\n return inmemory.ReadOnlyInMemoryLDAPEntry(\n dn=distinguishedname.DistinguishedName(\"dc=example,dc=com\")\n )\n\n\nclass TestAddOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for `AddOp`.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return the LDIF representation of the operation.\n \"\"\"\n sut = delta.AddOp(\n entry.BaseLDAPEntry(\n dn=\"dc=example,dc=com\",\n attributes={\n \"foo\": [\"bar\", \"baz\"],\n \"quux\": [\"thud\"],\n },\n )\n )\n\n result = sut.asLDIF()\n\n self.assertEqual(\n b\"\"\"dn: dc=example,dc=com\nchangetype: add\nfoo: bar\nfoo: baz\nquux: thud\n\n\"\"\",\n result,\n )\n\n def testAddOpEqualitySameEntry(self):\n \"\"\"\n Objects are equal when the have the same LDAP entry.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertEqual(first, second)\n\n def testAddOpInequalityDifferentEntry(self):\n \"\"\"\n Objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=First Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=First Team, dc=example,dc=com\",\n attributes={\"foo\": [\"other\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertNotEqual(first, second)\n\n def testAddOpInequalityNoEntryObject(self):\n \"\"\"\n Objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n sut = delta.AddOp(team_entry)\n\n self.assertNotEqual(sut, {\"foo\": [\"same\", \"attributes\"]})\n\n def testAddOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertEqual(hash(first), hash(second))\n\n def testAddOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"one\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"other\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertNotEqual(hash(first), hash(second))\n\n def testAddOp_DNExists(self):\n \"\"\"\n It fails to perform the `add` operation for an existing entry.\n \"\"\"\n root = self.getRoot()\n root.addChild(\n rdn=\"ou=Existing Team\",\n attributes={\n \"objectClass\": [\"a\", \"b\"],\n \"ou\": [\"HR\"],\n },\n )\n\n hr_entry = entry.BaseLDAPEntry(\n dn=\"ou=Existing Team, dc=example,dc=com\",\n attributes={\"foo\": [\"dont\", \"care\"]},\n )\n sut = delta.AddOp(hr_entry)\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPEntryAlreadyExists)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.AddOp(\n entry.BaseLDAPEntry(\n dn=\"dc=example,dc=com\",\n attributes={\n \"bar\": [\"foo\"],\n \"foo\": [\"bar\"],\n },\n )\n )\n\n self.assertEqual(\n repr(sut),\n \"AddOp(BaseLDAPEntry('dc=example,dc=com', \"\n \"{'bar': ['foo'], 'foo': ['bar']}))\",\n )\n\n\nclass TestDeleteOpLDIF(OperationTestCase):\n \"\"\"\n Unit tests for DeleteOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It return the LDIF representation of the delete operation.\n \"\"\"\n sut = delta.DeleteOp(\"dc=example,dc=com\")\n\n result = sut.asLDIF()\n self.assertEqual(\n b\"\"\"dn: dc=example,dc=com\nchangetype: delete\n\n\"\"\",\n result,\n )\n\n def testDeleteOpEqualitySameDN(self):\n \"\"\"\n Objects are equal when the have the same DN.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertEqual(first, second)\n\n def testDeleteOpEqualityEqualDN(self):\n \"\"\"\n DeleteOp objects are equal if their DNs are equal.\n \"\"\"\n first_dn = distinguishedname.DistinguishedName(\n stringValue=\"ou=Team,dc=example,dc=com\"\n )\n first = delta.DeleteOp(first_dn)\n\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example, dc=com\")\n second = delta.DeleteOp(second_entry)\n\n third = delta.DeleteOp(\"ou=Team, dc=example,dc=com\")\n\n self.assertEqual(first, second)\n self.assertEqual(first, third)\n\n def testDeleteOpInequalityDifferentEntry(self):\n \"\"\"\n DeleteOp objects are not equal when the have different LDAP entries.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Cowboys, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertNotEqual(first, second)\n\n def testDeleteOpInequalityNoEntryObject(self):\n \"\"\"\n DeleteOp objects is not equal with random objects.\n \"\"\"\n team_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n sut = delta.DeleteOp(team_entry)\n\n self.assertNotEqual(sut, \"ou=Team, dc=example,dc=com\")\n\n def testDeleteOpHashSimilar(self):\n \"\"\"\n Objects which are equal have the same hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertEqual(hash(first), hash(second))\n\n def testDeleteOpHashDifferent(self):\n \"\"\"\n Objects which are not equal have different hash.\n \"\"\"\n first_entry = entry.BaseLDAPEntry(dn=\"ou=Team, dc=example,dc=com\")\n second_entry = entry.BaseLDAPEntry(dn=\"ou=Cowboys, dc=example,dc=com\")\n\n first = delta.DeleteOp(first_entry)\n second = delta.DeleteOp(second_entry)\n\n self.assertNotEqual(hash(first), hash(second))\n\n def testDeleteOp_DNNotFound(self):\n \"\"\"\n If fail to delete when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.DeleteOp(\"cn=nope,dc=example,dc=com\")\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testDeleteOpInvalidDN(self):\n \"\"\"\n Invalid type of DN raises AssertionError\n \"\"\"\n self.assertRaises(AssertionError, delta.DeleteOp, 0)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.DeleteOp(\"dc=example,dc=com\")\n\n self.assertEqual(repr(sut), \"DeleteOp('dc=example,dc=com')\")\n\n\nclass TestModifyOp(OperationTestCase):\n \"\"\"\n Unit tests for ModifyOp.\n \"\"\"\n\n def testAsLDIF(self):\n \"\"\"\n It will return a LDIF representation of the contained operations.\n \"\"\"\n sut = delta.ModifyOp(\n \"cn=Paula Jensen, ou=Dev Ops, dc=airius, dc=com\",\n [\n delta.Add(\n \"postaladdress\",\n [\"123 Anystreet $ Sunnyvale, CA $ 94086\"],\n ),\n delta.Delete(\"description\"),\n delta.Replace(\n \"telephonenumber\",\n [\"+1 408 555 1234\", \"+1 408 555 5678\"],\n ),\n delta.Delete(\"facsimiletelephonenumber\", [\"+1 408 555 9876\"]),\n ],\n )\n\n result = sut.asLDIF()\n\n self.assertEqual(\n b\"\"\"dn: cn=Paula Jensen,ou=Dev Ops,dc=airius,dc=com\nchangetype: modify\nadd: postaladdress\npostaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086\n-\ndelete: description\n-\nreplace: telephonenumber\ntelephonenumber: +1 408 555 1234\ntelephonenumber: +1 408 555 5678\n-\ndelete: facsimiletelephonenumber\nfacsimiletelephonenumber: +1 408 555 9876\n-\n\n\"\"\",\n result,\n )\n\n def testInequalityDiffertnDN(self):\n \"\"\"\n Modify operations for different DN are not equal.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=doe,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testInequalityDifferentModifications(self):\n \"\"\"\n Modify operations with different modifications are not equal\n \"\"\"\n first = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Add(\"description\")])\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testInequalityNotModifyOP(self):\n \"\"\"\n Modify operations are not equal with other object types.\n \"\"\"\n sut = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")])\n\n self.assertNotEqual(\"cn=john,dc=example,dc=com\", sut)\n\n def testInequalityDiffertnOperations(self):\n \"\"\"\n Modify operations for same DN but different operations are not equal.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n second = delta.ModifyOp(\n \"cn=doe,dc=example,dc=com\", [delta.Delete(\"homeDirectory\")]\n )\n\n self.assertNotEqual(first, second)\n\n def testHashEquality(self):\n \"\"\"\n Modify operations can be hashed and equal objects have the same\n hash.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n self.assertEqual(first, second)\n self.assertEqual(\n first.asLDIF(),\n second.asLDIF(),\n \"LDIF equality is a precondition for valid hash values\",\n )\n self.assertEqual(hash(first), hash(second))\n\n def testHashInequality(self):\n \"\"\"\n Different modify operations have different hash values.\n \"\"\"\n first = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")]\n )\n\n second = delta.ModifyOp(\n \"cn=john,dc=example,dc=com\", [delta.Delete(\"homeDirectory\")]\n )\n\n self.assertNotEqual(first.asLDIF(), second.asLDIF())\n self.assertNotEqual(hash(first), hash(second))\n\n def testModifyOp_DNNotFound(self):\n \"\"\"\n If fail to modify when the RDN does not exists.\n \"\"\"\n root = self.getRoot()\n sut = delta.ModifyOp(\n \"cn=nope,dc=example,dc=com\",\n [delta.Add(\"foo\", [\"bar\"])],\n )\n\n deferred = sut.patch(root)\n\n failure = self.failureResultOf(deferred)\n self.assertIsInstance(failure.value, ldaperrors.LDAPNoSuchObject)\n\n def testRepr(self):\n \"\"\"\n Getting string representation\n \"\"\"\n sut = delta.ModifyOp(\"cn=john,dc=example,dc=com\", [delta.Delete(\"description\")])\n\n self.assertEqual(\n repr(sut),\n \"ModifyOp(dn='cn=john,dc=example,dc=com', \"\n \"modifications=[Delete('description', [])])\",\n )\n\n\nclass TestModificationComparison(unittest.TestCase):\n def testEquality_Add_True(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n self.assertEqual(a, b)\n\n def testEquality_AddVsDelete_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = delta.Delete(\"k\", [\"b\", \"c\", \"d\"])\n self.assertNotEqual(a, b)\n\n def testEquality_AttributeSet_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = attributeset.LDAPAttributeSet(\"k\", [\"b\", \"c\", \"d\"])\n self.assertNotEqual(a, b)\n\n def testEquality_List_False(self):\n a = delta.Add(\"k\", [\"b\", \"c\", \"d\"])\n b = [\"b\", \"c\", \"d\"]\n self.assertNotEqual(a, b)\n",
"step-ids": [
43,
46,
52,
54,
63
]
}
|
[
43,
46,
52,
54,
63
] |
from rest_framework import serializers
from books.models import Genres, Format, Book, Review, ExtraTableForPrice
class GenresSerializer(serializers.ModelSerializer):
class Meta:
model = Genres
fields = ('title', )
class PriceSerializer(serializers.ModelSerializer):
class Meta:
model = ExtraTableForPrice
fields = ('formats', 'price', )
class FormatSerializer(serializers.ModelSerializer):
class Meta:
model = Format
fields = ('title', )
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = '__all__'
def create(self, validated_data):
formats = validated_data.pop('format', [])
book = Book.objects.create(**validated_data)
book.format.add(*formats)
return book
def to_representation(self, instance):
representation = super().to_representation(instance)
representation['genre'] = GenresSerializer(instance.genre, context=self.context).data
representation['reviews'] = ReviewSerializer(instance.reviews.all(), many=True).data
representation['orders_count'] = instance.orders.count()
representation['price'] = PriceSerializer(instance.books_price.all(), many=True).data
return representation
class BookListSerializer(serializers.ModelSerializer):
details = serializers.HyperlinkedIdentityField(view_name='book-detail', lookup_field='slug')
class Meta:
model = Book
fields = ['title', 'author', 'genre', 'cover', 'details']
class ReviewSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Review
fields = ('user', 'book', 'text', 'rating', 'created_time')
def validate_rating(self, rating):
if rating not in range(1, 6):
raise(serializers.ValidationError('Оценка от 1 до 5'))
return rating
def create(self, validated_data):
request = self.context.get('request')
user = request.user
review = Review.objects.create(user=user, **validated_data)
return review
|
normal
|
{
"blob_id": "9c50a3abd353d5ba619eaa217dcc07ab76fb850c",
"index": 2519,
"step-1": "<mask token>\n\n\nclass BookSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Book\n fields = '__all__'\n\n def create(self, validated_data):\n formats = validated_data.pop('format', [])\n book = Book.objects.create(**validated_data)\n book.format.add(*formats)\n return book\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['genre'] = GenresSerializer(instance.genre, context=\n self.context).data\n representation['reviews'] = ReviewSerializer(instance.reviews.all(),\n many=True).data\n representation['orders_count'] = instance.orders.count()\n representation['price'] = PriceSerializer(instance.books_price.all(\n ), many=True).data\n return representation\n\n\nclass BookListSerializer(serializers.ModelSerializer):\n details = serializers.HyperlinkedIdentityField(view_name='book-detail',\n lookup_field='slug')\n\n\n class Meta:\n model = Book\n fields = ['title', 'author', 'genre', 'cover', 'details']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n\n\n class Meta:\n model = Review\n fields = 'user', 'book', 'text', 'rating', 'created_time'\n\n def validate_rating(self, rating):\n if rating not in range(1, 6):\n raise serializers.ValidationError('Оценка от 1 до 5')\n return rating\n\n def create(self, validated_data):\n request = self.context.get('request')\n user = request.user\n review = Review.objects.create(user=user, **validated_data)\n return review\n",
"step-2": "<mask token>\n\n\nclass PriceSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ExtraTableForPrice\n fields = 'formats', 'price'\n\n\nclass FormatSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Format\n fields = 'title',\n\n\nclass BookSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Book\n fields = '__all__'\n\n def create(self, validated_data):\n formats = validated_data.pop('format', [])\n book = Book.objects.create(**validated_data)\n book.format.add(*formats)\n return book\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['genre'] = GenresSerializer(instance.genre, context=\n self.context).data\n representation['reviews'] = ReviewSerializer(instance.reviews.all(),\n many=True).data\n representation['orders_count'] = instance.orders.count()\n representation['price'] = PriceSerializer(instance.books_price.all(\n ), many=True).data\n return representation\n\n\nclass BookListSerializer(serializers.ModelSerializer):\n details = serializers.HyperlinkedIdentityField(view_name='book-detail',\n lookup_field='slug')\n\n\n class Meta:\n model = Book\n fields = ['title', 'author', 'genre', 'cover', 'details']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n\n\n class Meta:\n model = Review\n fields = 'user', 'book', 'text', 'rating', 'created_time'\n\n def validate_rating(self, rating):\n if rating not in range(1, 6):\n raise serializers.ValidationError('Оценка от 1 до 5')\n return rating\n\n def create(self, validated_data):\n request = self.context.get('request')\n user = request.user\n review = Review.objects.create(user=user, **validated_data)\n return review\n",
"step-3": "<mask token>\n\n\nclass GenresSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genres\n fields = 'title',\n\n\nclass PriceSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ExtraTableForPrice\n fields = 'formats', 'price'\n\n\nclass FormatSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Format\n fields = 'title',\n\n\nclass BookSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Book\n fields = '__all__'\n\n def create(self, validated_data):\n formats = validated_data.pop('format', [])\n book = Book.objects.create(**validated_data)\n book.format.add(*formats)\n return book\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['genre'] = GenresSerializer(instance.genre, context=\n self.context).data\n representation['reviews'] = ReviewSerializer(instance.reviews.all(),\n many=True).data\n representation['orders_count'] = instance.orders.count()\n representation['price'] = PriceSerializer(instance.books_price.all(\n ), many=True).data\n return representation\n\n\nclass BookListSerializer(serializers.ModelSerializer):\n details = serializers.HyperlinkedIdentityField(view_name='book-detail',\n lookup_field='slug')\n\n\n class Meta:\n model = Book\n fields = ['title', 'author', 'genre', 'cover', 'details']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n\n\n class Meta:\n model = Review\n fields = 'user', 'book', 'text', 'rating', 'created_time'\n\n def validate_rating(self, rating):\n if rating not in range(1, 6):\n raise serializers.ValidationError('Оценка от 1 до 5')\n return rating\n\n def create(self, validated_data):\n request = self.context.get('request')\n user = request.user\n review = Review.objects.create(user=user, **validated_data)\n return review\n",
"step-4": "from rest_framework import serializers\nfrom books.models import Genres, Format, Book, Review, ExtraTableForPrice\n\n\nclass GenresSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genres\n fields = 'title',\n\n\nclass PriceSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = ExtraTableForPrice\n fields = 'formats', 'price'\n\n\nclass FormatSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Format\n fields = 'title',\n\n\nclass BookSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Book\n fields = '__all__'\n\n def create(self, validated_data):\n formats = validated_data.pop('format', [])\n book = Book.objects.create(**validated_data)\n book.format.add(*formats)\n return book\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['genre'] = GenresSerializer(instance.genre, context=\n self.context).data\n representation['reviews'] = ReviewSerializer(instance.reviews.all(),\n many=True).data\n representation['orders_count'] = instance.orders.count()\n representation['price'] = PriceSerializer(instance.books_price.all(\n ), many=True).data\n return representation\n\n\nclass BookListSerializer(serializers.ModelSerializer):\n details = serializers.HyperlinkedIdentityField(view_name='book-detail',\n lookup_field='slug')\n\n\n class Meta:\n model = Book\n fields = ['title', 'author', 'genre', 'cover', 'details']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n\n\n class Meta:\n model = Review\n fields = 'user', 'book', 'text', 'rating', 'created_time'\n\n def validate_rating(self, rating):\n if rating not in range(1, 6):\n raise serializers.ValidationError('Оценка от 1 до 5')\n return rating\n\n def create(self, validated_data):\n request = self.context.get('request')\n user = request.user\n review = Review.objects.create(user=user, **validated_data)\n return review\n",
"step-5": "from rest_framework import serializers\n\nfrom books.models import Genres, Format, Book, Review, ExtraTableForPrice\n\n\nclass GenresSerializer(serializers.ModelSerializer):\n class Meta:\n model = Genres\n fields = ('title', )\n\n\nclass PriceSerializer(serializers.ModelSerializer):\n class Meta:\n model = ExtraTableForPrice\n fields = ('formats', 'price', )\n\n\nclass FormatSerializer(serializers.ModelSerializer):\n class Meta:\n model = Format\n fields = ('title', )\n\n\nclass BookSerializer(serializers.ModelSerializer):\n class Meta:\n model = Book\n fields = '__all__'\n\n def create(self, validated_data):\n formats = validated_data.pop('format', [])\n book = Book.objects.create(**validated_data)\n book.format.add(*formats)\n return book\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['genre'] = GenresSerializer(instance.genre, context=self.context).data\n representation['reviews'] = ReviewSerializer(instance.reviews.all(), many=True).data\n representation['orders_count'] = instance.orders.count()\n representation['price'] = PriceSerializer(instance.books_price.all(), many=True).data\n return representation\n\n\nclass BookListSerializer(serializers.ModelSerializer):\n details = serializers.HyperlinkedIdentityField(view_name='book-detail', lookup_field='slug')\n\n class Meta:\n model = Book\n fields = ['title', 'author', 'genre', 'cover', 'details']\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Review\n fields = ('user', 'book', 'text', 'rating', 'created_time')\n\n def validate_rating(self, rating):\n if rating not in range(1, 6):\n raise(serializers.ValidationError('Оценка от 1 до 5'))\n return rating\n\n def create(self, validated_data):\n request = self.context.get('request')\n user = request.user\n review = Review.objects.create(user=user, **validated_data)\n return review\n",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open(
'/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',
'rb').read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)
self.t += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')
<|reserved_special_token_0|>
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open(
'/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',
'rb').read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)
self.t += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GLOWR = 128, 256
GLOWR = 160, 400
sys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')
<|reserved_special_token_0|>
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open(
'/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',
'rb').read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)
self.t += 1
<|reserved_special_token_1|>
import sys
import array
import random
import math
import gameduino2.prep
import zlib
import struct
import gameduino as GD
from eve import align4
from PIL import Image
import numpy as np
import wave
import common
GLOWR = 128, 256
GLOWR = 160, 400
sys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')
import night0
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open(
'/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',
'rb').read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)
self.t += 1
<|reserved_special_token_1|>
import sys
import array
import random
import math
import gameduino2.prep
import zlib
import struct
import gameduino as GD
from eve import align4
from PIL import Image
import numpy as np
import wave
import common
GLOWR = (128, 256)
GLOWR = (160, 400)
sys.path.append("/home/jamesb/git/gd2-asset/examples/nightstrike")
import night0
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open("/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3", "rb").read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle = self.t)
self.t += 1
|
flexible
|
{
"blob_id": "2471daad5969da29a20417a099a3ecd92fa036b4",
"index": 6393,
"step-1": "<mask token>\n\n\nclass Renderer(common.Branded):\n\n def __init__(self, eve):\n self.eve = eve\n self.t = 0\n\n def load(self):\n eve = self.eve\n eve.cc(open(\n '/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',\n 'rb').read())\n\n def draw(self):\n eve = self.eve\n eve.VertexFormat(3)\n eve.ClearColorRGB(0, 0, 100)\n eve.Clear()\n eve.Begin(GD.BITMAPS)\n eve.BlendFunc(GD.SRC_ALPHA, 0)\n night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)\n self.t += 1\n",
"step-2": "<mask token>\nsys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')\n<mask token>\n\n\nclass Renderer(common.Branded):\n\n def __init__(self, eve):\n self.eve = eve\n self.t = 0\n\n def load(self):\n eve = self.eve\n eve.cc(open(\n '/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',\n 'rb').read())\n\n def draw(self):\n eve = self.eve\n eve.VertexFormat(3)\n eve.ClearColorRGB(0, 0, 100)\n eve.Clear()\n eve.Begin(GD.BITMAPS)\n eve.BlendFunc(GD.SRC_ALPHA, 0)\n night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)\n self.t += 1\n",
"step-3": "<mask token>\nGLOWR = 128, 256\nGLOWR = 160, 400\nsys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')\n<mask token>\n\n\nclass Renderer(common.Branded):\n\n def __init__(self, eve):\n self.eve = eve\n self.t = 0\n\n def load(self):\n eve = self.eve\n eve.cc(open(\n '/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',\n 'rb').read())\n\n def draw(self):\n eve = self.eve\n eve.VertexFormat(3)\n eve.ClearColorRGB(0, 0, 100)\n eve.Clear()\n eve.Begin(GD.BITMAPS)\n eve.BlendFunc(GD.SRC_ALPHA, 0)\n night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)\n self.t += 1\n",
"step-4": "import sys\nimport array\nimport random\nimport math\nimport gameduino2.prep\nimport zlib\nimport struct\nimport gameduino as GD\nfrom eve import align4\nfrom PIL import Image\nimport numpy as np\nimport wave\nimport common\nGLOWR = 128, 256\nGLOWR = 160, 400\nsys.path.append('/home/jamesb/git/gd2-asset/examples/nightstrike')\nimport night0\n\n\nclass Renderer(common.Branded):\n\n def __init__(self, eve):\n self.eve = eve\n self.t = 0\n\n def load(self):\n eve = self.eve\n eve.cc(open(\n '/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3',\n 'rb').read())\n\n def draw(self):\n eve = self.eve\n eve.VertexFormat(3)\n eve.ClearColorRGB(0, 0, 100)\n eve.Clear()\n eve.Begin(GD.BITMAPS)\n eve.BlendFunc(GD.SRC_ALPHA, 0)\n night0.missile_a.draw(eve, 640, 360, 2, angle=self.t)\n self.t += 1\n",
"step-5": "import sys\nimport array\nimport random\nimport math\nimport gameduino2.prep\nimport zlib\nimport struct\nimport gameduino as GD\nfrom eve import align4\n\nfrom PIL import Image\nimport numpy as np\nimport wave\nimport common\n\nGLOWR = (128, 256)\nGLOWR = (160, 400)\n\nsys.path.append(\"/home/jamesb/git/gd2-asset/examples/nightstrike\")\nimport night0\n\nclass Renderer(common.Branded):\n def __init__(self, eve):\n self.eve = eve\n self.t = 0\n\n def load(self):\n eve = self.eve\n\n eve.cc(open(\"/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3\", \"rb\").read())\n\n def draw(self):\n eve = self.eve\n\n eve.VertexFormat(3)\n eve.ClearColorRGB(0, 0, 100)\n eve.Clear()\n\n eve.Begin(GD.BITMAPS)\n eve.BlendFunc(GD.SRC_ALPHA, 0)\n\n night0.missile_a.draw(eve, 640, 360, 2, angle = self.t)\n self.t += 1\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from django.shortcuts import render
import datetime
from django.http import*
from django.core.files.storage import FileSystemStorage
import uuid
import os
import cv2
import numpy as np
from pathlib import Path
def index(request):
print(request.session);
today=datetime.datetime.now()
return render(request,'index.html',{
"today":today.strftime("%d-%m=%Y")})
def isFileOpen(request):
stack=request.session['stack']
if stack>0 and request.session.get('name')!=None and request.session.get('email')!=None:
return true
else:
return false
def getState(request):
if(isFileOpen):
fileName=request.session['stack'][0]
email=request.session['email']
name=request.session['name']
return JsonResponse({'state':'open','name':name,'email':email,'fileName':fileName})
else:
return JsonResponse({'state':none,'name':'',email:'','fileName':''})
def openFile(request):
if request.method=='POST' and request.FILES['fileName']:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
stack=[]
redostack=[]
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
(h, w) = img.shape[:2]
r = 500 / float(h)
dim = (int(w * r),500)
stdimg=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)
stdimgPath=str(Path(imgpath).with_suffix(''))+str(uuid.uuid4())[-3:]+'.png'
print(stdimgPath)
cv2.imwrite(stdimgPath,stdimg)
stdFileName=stdimgPath.split('/')[-1];
stack.append(stdFileName)
request.session['stack']=stack
print(img.shape)
request.session['size']=()
request.session['redo']=True
request.session['oriImg']=imageFileName
request.session['borderSize']=0;
request.session['email']=request.POST['email']
request.session['name']=request.POST.get('name')
request.session['redostack']=redostack
return JsonResponse({'fileName':imageFileName})
def getImage(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
fileToServer=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
return FileResponse(open(fileToServer,'rb'))
return HttpResponse('')
def showOrignal(request):
if request.method=="GET" and request.session.has_key('oriImg'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
stack=[]
stack.insert(0,request.session['oriImg'])
request.session['stack']=stack
return JsonResponse({'response':'orignal'})
else:
return HttpResponse('')
def closeFile(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
request.session.pop('email')
request.session.pop('name')
return JsonResponse({'response':'closed'})
else:
return HttpResponse('');
def undo(request):
if request.method=="GET" and request.session.has_key('stack') and len(request.session['stack'])>1:
stack=request.session['stack']
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack.pop(0)))
os.remove(fileDelete);
request.session['stack']=stack;
return JsonResponse({"response":"undid"})
else:
return HttpResponse('')
def redo(request):
if request.method=="GET" and request.session.has_key('redostack') and len(request.session['redostack'])>0:
redoStack=request.session['redostack']
request.session['redo']=False;
value=redoStack.pop()
if(value=='grayscale'):
toGrayscale(request)
if(value=='cool'):
cool(request)
if(value=='scaleIt'):
scaleit(request)
if(value=='setBorder'):
setBorder(request);
request.session['redostack']=redoStack;
return JsonResponse({'response':'redo'})
def toGrayscale(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.cvtColor(grayImage,cv2.COLOR_BGR2GRAY)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'grayscale')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def scaleit(request):
if request.method=="POST" and request.session.has_key('stack'):
newX=int(request.POST['newX'])
newY=int(request.POST['newY'])
request.session['size']=(newX,newY)
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(newX,newY),interpolation=cv2.INTER_AREA)
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleIt')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
if request.method=="GET" and request.session.has_key('size'):
newX=request.session['size'][0]
newY=request.session['size'][1]
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(int(newX),int(newY)))
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleit')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
else:
return HttpResponse('')
def cropIt(request):
if request.method=="POST" and request.session.has_key('stack'):
x=int(request.POST['X']);
y=int(request.POST['Y']);
h=int(request.POST['h'])
w=int(request.POST['w'])
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
cropfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
crop_img = oriimg[y:h, x:w]
cv2.imwrite(cropfilepath,crop_img);
cropfilename=cropfilepath.split('/')[-1]
stack.insert(0,cropfilename)
request.session['redostack']=redostack;
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def setBorder(request):
if request.method=="POST" and request.session.has_key('stack'):
bordersize=int(request.POST['size']);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['borderSize']=bordersize
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
if request.session['redo']:
redostack.insert(0,'setBorder')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def cool(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.applyColorMap(grayImage,cv2.COLORMAP_PARULA)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'cool')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def addWatermark(request):
if request.method=="POST" and request.session.has_key('stack'):
text=request.POST['t']
print(text);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['text']=text
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
textimgPath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
overlay=oriimg.copy()
output=oriimg.copy()
cv2.putText(overlay,text.format(0.5),(10,30),cv2. cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
cv2.addWeighted(overlay,0.5,output,1-0.5,0,output)
cv2.imwrite(textimgPath,output);
textimgName=textimgPath.split('/')[-1]
stack.insert(0,textimgName)
if request.session['redo']:
redostack.insert(0,'addWatermark')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
def rotateRight(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
rotatefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
rotateImage=cv2.imread(fileAbsPath)
(h,w)=rotateImage.shape[:2]
center=(w/2,h/2)
angle90=90
scale=1.0
M=cv2.getRotationMatrix2D(center,angle90,scale)
rotated180=cv2.warpAffine(rotateImage,M,(h,w))
cv2.imwrite(rotatefilepath,rotated180)
gfilename=rotatefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'rotateRight')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
def overlay(request):
if request.method=="POST" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
oriimgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
oriimg=cv2.imread(oriimgpath)
h,w=oriimg.shape[:2]
print(h,w);
tsa='large_white_square.png';
transImgPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%tsa))
tsa=cv2.imread(transImgPath);
tsa=cv2.resize(tsa,(w,h))
h,w=tsa.shape[:2]
print(h,w)
x_offset=y_offset=50
tsa[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img
h,w=tsa.shape[:2]
print(h,w)
dst=cv2.addWeighted(oriimg,0.7,tsa,0.3,0);
uui=str(uuid.uuid4())
print(uui)
print(uui[-3:])
overlayfilepath=str(Path(oriimgpath).with_suffix(''))+uui[-3:]+'.png' #here dirty coding......
cv2.imwrite(overlayfilepath,dst);
overlayfilename=overlayfilepath.split('/')[-1]
stack.insert(0,overlayfilename)
print(stack[0]);
if request.session['redo']:
#redostack.insert(0,'overlayed')
request.session['redo']=True
request.session['stack']=stack
#request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
|
normal
|
{
"blob_id": "3378ce72ae67d09258554048138b7f9023000922",
"index": 6619,
"step-1": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-2": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-3": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef setBorder(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n bordersize = int(request.POST['size'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['borderSize'] = bordersize\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n if request.session['redo']:\n redostack.insert(0, 'setBorder')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-4": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\ndef getState(request):\n if isFileOpen:\n fileName = request.session['stack'][0]\n email = request.session['email']\n name = request.session['name']\n return JsonResponse({'state': 'open', 'name': name, 'email': email,\n 'fileName': fileName})\n else:\n return JsonResponse({'state': none, 'name': '', email: '',\n 'fileName': ''})\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef setBorder(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n bordersize = int(request.POST['size'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['borderSize'] = bordersize\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n if request.session['redo']:\n redostack.insert(0, 'setBorder')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef addWatermark(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n text = request.POST['t']\n print(text)\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['text'] = text\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n textimgPath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.uuid4()\n ) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n overlay = oriimg.copy()\n output = oriimg.copy()\n cv2.putText(overlay, text.format(0.5), (10, 30), cv2.cv2.\n FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)\n cv2.addWeighted(overlay, 0.5, output, 1 - 0.5, 0, output)\n cv2.imwrite(textimgPath, output)\n textimgName = textimgPath.split('/')[-1]\n stack.insert(0, textimgName)\n if request.session['redo']:\n redostack.insert(0, 'addWatermark')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-5": "from django.shortcuts import render\nimport datetime\nfrom django.http import*\nfrom django.core.files.storage import FileSystemStorage\nimport uuid \nimport os\nimport cv2\nimport numpy as np\nfrom pathlib import Path\n\ndef index(request):\n print(request.session);\n today=datetime.datetime.now()\n return render(request,'index.html',{\n \"today\":today.strftime(\"%d-%m=%Y\")})\n\ndef isFileOpen(request):\n stack=request.session['stack']\n if stack>0 and request.session.get('name')!=None and request.session.get('email')!=None:\n return true\n \n else:\n return false\n \n\t\n\ndef getState(request):\n if(isFileOpen):\n fileName=request.session['stack'][0]\n email=request.session['email']\n name=request.session['name']\n return JsonResponse({'state':'open','name':name,'email':email,'fileName':fileName})\n \n else:\n return JsonResponse({'state':none,'name':'',email:'','fileName':''})\t\n \n \n\ndef openFile(request):\n if request.method=='POST' and request.FILES['fileName']:\n imageFile=request.FILES['fileName']\n fs=FileSystemStorage()\n imageFileName=fs.save(imageFile.name,imageFile)\n stack=[]\n redostack=[]\n \n imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))\n img=cv2.imread(imgpath)\n (h, w) = img.shape[:2]\n r = 500 / float(h)\n dim = (int(w * r),500)\n \n stdimg=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)\n stdimgPath=str(Path(imgpath).with_suffix(''))+str(uuid.uuid4())[-3:]+'.png' \n print(stdimgPath)\n cv2.imwrite(stdimgPath,stdimg)\n stdFileName=stdimgPath.split('/')[-1];\n\n stack.append(stdFileName)\n request.session['stack']=stack\n print(img.shape)\n request.session['size']=()\n request.session['redo']=True\n request.session['oriImg']=imageFileName\n request.session['borderSize']=0;\n request.session['email']=request.POST['email']\n request.session['name']=request.POST.get('name')\n request.session['redostack']=redostack\n \t\n return JsonResponse({'fileName':imageFileName})\n\ndef getImage(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n if len(stack)>0:\n fileToServer=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))\n \n return FileResponse(open(fileToServer,'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method==\"GET\" and request.session.has_key('oriImg'):\n stack=request.session['stack']\n for file in stack:\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))\n os.remove(fileDelete);\n request.session.pop('stack')\n stack=[]\n stack.insert(0,request.session['oriImg'])\n request.session['stack']=stack\n return JsonResponse({'response':'orignal'})\n else:\n return HttpResponse('')\n \n \n\n\ndef closeFile(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n for file in stack:\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))\n os.remove(fileDelete);\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response':'closed'})\n else:\n return HttpResponse('');\n\ndef undo(request):\n if request.method==\"GET\" and request.session.has_key('stack') and len(request.session['stack'])>1:\n stack=request.session['stack']\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack.pop(0)))\n os.remove(fileDelete);\n request.session['stack']=stack;\n return JsonResponse({\"response\":\"undid\"})\n else:\n return HttpResponse('')\n\ndef redo(request):\n if request.method==\"GET\" and request.session.has_key('redostack') and len(request.session['redostack'])>0:\n redoStack=request.session['redostack']\n request.session['redo']=False;\n value=redoStack.pop()\n if(value=='grayscale'):\n toGrayscale(request)\n if(value=='cool'):\n cool(request)\n if(value=='scaleIt'):\n scaleit(request)\n if(value=='setBorder'):\n setBorder(request); \n request.session['redostack']=redoStack;\n return JsonResponse({'response':'redo'})\n\n\ndef toGrayscale(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n grayImage=cv2.imread(fileAbsPath)\n grayImage=cv2.cvtColor(grayImage,cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath,grayImage)\n gfilename=grayscalefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'grayscale')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'convertedToGrayscale'}) \n else:\n return HttpResponse()\n\ndef scaleit(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n newX=int(request.POST['newX'])\n newY=int(request.POST['newY'])\n \n request.session['size']=(newX,newY)\n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n \n oriimg=cv2.imread(fileAbsPath)\n newimg=cv2.resize(oriimg,(newX,newY),interpolation=cv2.INTER_AREA)\n request.session['size']=newimg.shape;\n cv2.imwrite(scalefilepath,newimg);\n \n scalefilename=scalefilepath.split('/')[-1]\n stack.insert(0,scalefilename)\n redostack.insert(0,'scaleIt')\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'scaled'})\n if request.method==\"GET\" and request.session.has_key('size'):\n newX=request.session['size'][0]\n newY=request.session['size'][1]\n \n \n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n \n oriimg=cv2.imread(fileAbsPath)\n newimg=cv2.resize(oriimg,(int(newX),int(newY)))\n request.session['size']=newimg.shape;\n cv2.imwrite(scalefilepath,newimg);\n \n scalefilename=scalefilepath.split('/')[-1]\n stack.insert(0,scalefilename)\n redostack.insert(0,'scaleit')\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'scaled'})\n else:\n return HttpResponse('')\n \n\ndef cropIt(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n x=int(request.POST['X']);\n y=int(request.POST['Y']);\n h=int(request.POST['h'])\n w=int(request.POST['w'])\n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n cropfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n \n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath,crop_img);\n cropfilename=cropfilepath.split('/')[-1]\n stack.insert(0,cropfilename)\n \n request.session['redostack']=redostack;\n request.session['stack']=stack;\n\n return JsonResponse({'response':'croped'})\n else:\n return HttpResponse('') \n \ndef setBorder(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n bordersize=int(request.POST['size']);\n stack=request.session['stack']\n redostack=request.session['redostack']\n request.session['borderSize']=bordersize\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n mean=cv2.mean(bottom)[0]\n border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean]) \n \n cv2.imwrite(borderfilepath,border);\n borderfilename=borderfilepath.split('/')[-1]\n stack.insert(0,borderfilename)\n if request.session['redo']:\n redostack.insert(0,'setBorder')\n request.session['redo']=True\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n if request.method==\"GET\" and request.session.has_key('borderSize'):\n bordersize=request.session['borderSize'];\n stack=request.session['stack']\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n oriimg=cv2.imread(fileAbsPath)\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n mean=cv2.mean(bottom)[0]\n border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])\n cv2.imwrite(borderfilepath,border);\n borderfilename=borderfilepath.split('/')[-1]\n stack.insert(0,borderfilename)\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n grayImage=cv2.imread(fileAbsPath)\n grayImage=cv2.applyColorMap(grayImage,cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath,grayImage)\n gfilename=grayscalefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'cool')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n\n\n\n\n\ndef addWatermark(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n text=request.POST['t']\n print(text);\n stack=request.session['stack']\n redostack=request.session['redostack']\n request.session['text']=text\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n textimgPath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n overlay=oriimg.copy()\n output=oriimg.copy()\n cv2.putText(overlay,text.format(0.5),(10,30),cv2. cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)\n \n\n cv2.addWeighted(overlay,0.5,output,1-0.5,0,output)\n \n cv2.imwrite(textimgPath,output);\n textimgName=textimgPath.split('/')[-1]\n stack.insert(0,textimgName)\n if request.session['redo']:\n redostack.insert(0,'addWatermark')\n request.session['redo']=True\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n if request.method==\"GET\" and request.session.has_key('borderSize'):\n bordersize=request.session['borderSize'];\n stack=request.session['stack']\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n oriimg=cv2.imread(fileAbsPath)\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n\ndef rotateRight(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n rotatefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n rotateImage=cv2.imread(fileAbsPath)\n (h,w)=rotateImage.shape[:2]\n center=(w/2,h/2)\n angle90=90\n scale=1.0\n M=cv2.getRotationMatrix2D(center,angle90,scale)\n rotated180=cv2.warpAffine(rotateImage,M,(h,w))\n\n cv2.imwrite(rotatefilepath,rotated180)\n gfilename=rotatefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'rotateRight')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'rotated'})\n else:\n return HttpResponse()\n\ndef overlay(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n stack=request.session['stack']\n if len(stack)>0:\n imageFile=request.FILES['fileName']\n fs=FileSystemStorage()\n imageFileName=fs.save(imageFile.name,imageFile)\n imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))\n img=cv2.imread(imgpath)\n oriimgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))\n oriimg=cv2.imread(oriimgpath)\n h,w=oriimg.shape[:2]\n print(h,w);\n\n tsa='large_white_square.png'; \n transImgPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%tsa))\n tsa=cv2.imread(transImgPath);\n tsa=cv2.resize(tsa,(w,h))\n h,w=tsa.shape[:2]\n print(h,w)\n x_offset=y_offset=50\n tsa[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img\n h,w=tsa.shape[:2]\n print(h,w)\n\n \n dst=cv2.addWeighted(oriimg,0.7,tsa,0.3,0);\n uui=str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath=str(Path(oriimgpath).with_suffix(''))+uui[-3:]+'.png' #here dirty coding......\n cv2.imwrite(overlayfilepath,dst);\n overlayfilename=overlayfilepath.split('/')[-1]\n stack.insert(0,overlayfilename) \n print(stack[0]);\n if request.session['redo']:\n #redostack.insert(0,'overlayed')\n request.session['redo']=True\n request.session['stack']=stack\n #request.session['redostack']=redostack\n return JsonResponse({'response':'rotated'})\n else:\n return HttpResponse()\n\n \n\n",
"step-ids": [
12,
14,
15,
17,
19
]
}
|
[
12,
14,
15,
17,
19
] |
# Generated by Django 3.2.4 on 2021-08-09 03:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('employee', '0013_auto_20210808_1242'),
]
operations = [
]
|
normal
|
{
"blob_id": "f7a335db0ddf8a871e98eac54b59c41a40622153",
"index": 4566,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')]\n operations = []\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('employee', '0013_auto_20210808_1242')]\n operations = []\n",
"step-5": "# Generated by Django 3.2.4 on 2021-08-09 03:22\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('employee', '0013_auto_20210808_1242'),\n ]\n\n operations = [\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f():
global animal
animal = 'dog'
print('local_scope:', animal)
print('local:', locals())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f():
global animal
animal = 'dog'
print('local_scope:', animal)
print('local:', locals())
f()
print('global_scope:', animal)
print('global:', locals())
<|reserved_special_token_1|>
animal = 'cat'
def f():
global animal
animal = 'dog'
print('local_scope:', animal)
print('local:', locals())
f()
print('global_scope:', animal)
print('global:', locals())
|
flexible
|
{
"blob_id": "4f3908e12102cfd58737952803c710772e960b0e",
"index": 2385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f():\n global animal\n animal = 'dog'\n print('local_scope:', animal)\n print('local:', locals())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f():\n global animal\n animal = 'dog'\n print('local_scope:', animal)\n print('local:', locals())\n\n\nf()\nprint('global_scope:', animal)\nprint('global:', locals())\n",
"step-4": "animal = 'cat'\n\n\ndef f():\n global animal\n animal = 'dog'\n print('local_scope:', animal)\n print('local:', locals())\n\n\nf()\nprint('global_scope:', animal)\nprint('global:', locals())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
chroot_start_path = '/srv/chroot'
chroots_conf = '/etc/schroot/chroot.d'
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf = {'debootstrap_mirror': None, 'components': None,
'source_security_suites': None, 'source_security_url': None,
'skip_updates': False, 'skip_security': False, 'keyring': None}
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
<|reserved_special_token_1|>
import sys, os
import argparse
import subprocess
from pprint import pprint
chroot_start_path = '/srv/chroot'
chroots_conf = '/etc/schroot/chroot.d'
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf = {'debootstrap_mirror': None, 'components': None,
'source_security_suites': None, 'source_security_url': None,
'skip_updates': False, 'skip_security': False, 'keyring': None}
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
<|reserved_special_token_1|>
#!/usr/bin/python
import sys,os
import argparse
import subprocess
from pprint import pprint
chroot_start_path="/srv/chroot"
chroots_conf="/etc/schroot/chroot.d"
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf={
'debootstrap_mirror':None,
'components':None,
'source_security_suites':None,
'source_security_url':None,
'skip_updates':False,
'skip_security':False,
'keyring':None,
}
def configure_distro(distro="debian",arch="i386",release="unstable"):
if distro not in ['ubuntu','debian']:
print("Unknown Distro %s" % distro)
return False
if (distro == 'ubuntu'):
if ( arch in ['amd64','i386'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
elif ( arch in ['armel', 'hppa', 'ia64' , 'lpia', 'sparc'] ):
distro_conf['debootstrap_mirror'] = "http://ports.ubuntu.com/ubuntu-ports"
elif ( arch in ['powerpc'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
distro_conf['components'] = ['main','restricted', 'universe', 'multiverse']
distro_conf['keyring'] = "/usr/share/keyrings/ubuntu-archive-keyring.gpg"
elif (distro == 'debian'):
distro_conf['debootstrap_mirror'] = "http://ftp.debian.org/debian"
distro_conf['components'] = ['main','non-free','contrib']
distro_conf['source_security_suites'] = "RELEASE/updates"
distro_conf['source_security_url'] = "http://security.debian.org/"
#Debian only performs security updates
distro_conf['skip_updates'] = True
if (release in ['unstable','sid'] ):
distro_conf['skip_security'] = True
distro_conf['keyring'] = "/usr/share/keyrings/debian-archive-keyring.gpg"
def check_chroot_path(start_path,end_path):
if( os.path.ismount( start_path ) ) :
print("%s is mounted" % start_path)
else:
print("%s is not mounted" % start_path)
exit()
complete_path = os.path.join(start_path,end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if (not p.returncode):
print("E: %s already exist!"%complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if __name__ == "__main__":
if os.geteuid() != 0:
print("You must be root")
exit()
parser = argparse.ArgumentParser(description="Create a Sbuild Chroot",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--distro',metavar="DISTRIBUTION",help="Install specific distro",default="debian")
parser.add_argument('-a','--arch',metavar="ARCHITECTURE",help="What architecture to select",default="i386")
parser.add_argument('-r','--release',help="What release to select",default="unstable")
args = parser.parse_args()
chroot_end_path = os.path.join( args.distro , "-".join([args.release,args.arch]) )
check_chroot_path(chroot_start_path,chroot_end_path)
configure_distro(args.distro,args.arch,args.release)
pprint(distro_conf)
cmd = [ 'sbuild-createchroot' ,
'--verbose',
'--keyring=%s' % distro_conf['keyring'] ,
'--arch=%s' % args.arch ,
'--include=%s' % include,
'--components=%s' % ",".join(distro_conf['components']),
args.release ,
os.path.join(chroot_start_path,chroot_end_path),
distro_conf['debootstrap_mirror'],
]
pprint(cmd)
p = subprocess.Popen(cmd,cwd='/')
p.wait()
|
flexible
|
{
"blob_id": "600691b87f7776e96bbf439d7195b870ed86090b",
"index": 1145,
"step-1": "<mask token>\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-3": "<mask token>\nchroot_start_path = '/srv/chroot'\nchroots_conf = '/etc/schroot/chroot.d'\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf = {'debootstrap_mirror': None, 'components': None,\n 'source_security_suites': None, 'source_security_url': None,\n 'skip_updates': False, 'skip_security': False, 'keyring': None}\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-4": "import sys, os\nimport argparse\nimport subprocess\nfrom pprint import pprint\nchroot_start_path = '/srv/chroot'\nchroots_conf = '/etc/schroot/chroot.d'\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf = {'debootstrap_mirror': None, 'components': None,\n 'source_security_suites': None, 'source_security_url': None,\n 'skip_updates': False, 'skip_security': False, 'keyring': None}\n\n\ndef configure_distro(distro='debian', arch='i386', release='unstable'):\n if distro not in ['ubuntu', 'debian']:\n print('Unknown Distro %s' % distro)\n return False\n if distro == 'ubuntu':\n if arch in ['amd64', 'i386']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://ports.ubuntu.com/ubuntu-ports'\n elif arch in ['powerpc']:\n distro_conf['debootstrap_mirror'\n ] = 'http://archive.ubuntu.com/ubuntu'\n distro_conf['components'] = ['main', 'restricted', 'universe',\n 'multiverse']\n distro_conf['keyring'\n ] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'\n elif distro == 'debian':\n distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'\n distro_conf['components'] = ['main', 'non-free', 'contrib']\n distro_conf['source_security_suites'] = 'RELEASE/updates'\n distro_conf['source_security_url'] = 'http://security.debian.org/'\n distro_conf['skip_updates'] = True\n if release in ['unstable', 'sid']:\n distro_conf['skip_security'] = True\n distro_conf['keyring'\n ] = '/usr/share/keyrings/debian-archive-keyring.gpg'\n\n\ndef check_chroot_path(start_path, end_path):\n if os.path.ismount(start_path):\n print('%s is mounted' % start_path)\n else:\n print('%s is not mounted' % start_path)\n exit()\n complete_path = os.path.join(start_path, end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n if not p.returncode:\n print('E: %s already exist!' % complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd, cwd='/', shell=True)\n p.wait()\n print(p.returncode)\n\n\nif __name__ == '__main__':\n if os.geteuid() != 0:\n print('You must be root')\n exit()\n parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=\n 'Install specific distro', default='debian')\n parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=\n 'What architecture to select', default='i386')\n parser.add_argument('-r', '--release', help='What release to select',\n default='unstable')\n args = parser.parse_args()\n chroot_end_path = os.path.join(args.distro, '-'.join([args.release,\n args.arch]))\n check_chroot_path(chroot_start_path, chroot_end_path)\n configure_distro(args.distro, args.arch, args.release)\n pprint(distro_conf)\n cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf\n ['keyring'], '--arch=%s' % args.arch, '--include=%s' % include, \n '--components=%s' % ','.join(distro_conf['components']), args.\n release, os.path.join(chroot_start_path, chroot_end_path),\n distro_conf['debootstrap_mirror']]\n pprint(cmd)\n p = subprocess.Popen(cmd, cwd='/')\n p.wait()\n",
"step-5": "#!/usr/bin/python\n\nimport sys,os\nimport argparse\nimport subprocess\nfrom pprint import pprint\n\nchroot_start_path=\"/srv/chroot\"\nchroots_conf=\"/etc/schroot/chroot.d\"\n\nbuild_pkgs = 'build-essential fakeroot devscripts apt-utils'\ninclude = 'eatmydata,ccache,lintian'\ndistro_conf={\n 'debootstrap_mirror':None,\n 'components':None,\n 'source_security_suites':None,\n 'source_security_url':None,\n 'skip_updates':False,\n 'skip_security':False,\n 'keyring':None,\n}\ndef configure_distro(distro=\"debian\",arch=\"i386\",release=\"unstable\"):\n\n if distro not in ['ubuntu','debian']:\n print(\"Unknown Distro %s\" % distro)\n return False\n\n if (distro == 'ubuntu'):\n if ( arch in ['amd64','i386'] ):\n distro_conf['debootstrap_mirror'] = \"http://archive.ubuntu.com/ubuntu\"\n elif ( arch in ['armel', 'hppa', 'ia64' , 'lpia', 'sparc'] ):\n distro_conf['debootstrap_mirror'] = \"http://ports.ubuntu.com/ubuntu-ports\"\n elif ( arch in ['powerpc'] ):\n distro_conf['debootstrap_mirror'] = \"http://archive.ubuntu.com/ubuntu\"\n\n distro_conf['components'] = ['main','restricted', 'universe', 'multiverse']\n\n distro_conf['keyring'] = \"/usr/share/keyrings/ubuntu-archive-keyring.gpg\"\n elif (distro == 'debian'):\n distro_conf['debootstrap_mirror'] = \"http://ftp.debian.org/debian\"\n distro_conf['components'] = ['main','non-free','contrib']\n distro_conf['source_security_suites'] = \"RELEASE/updates\"\n distro_conf['source_security_url'] = \"http://security.debian.org/\"\n #Debian only performs security updates\n distro_conf['skip_updates'] = True\n\n if (release in ['unstable','sid'] ):\n distro_conf['skip_security'] = True\n\n distro_conf['keyring'] = \"/usr/share/keyrings/debian-archive-keyring.gpg\"\n\n\ndef check_chroot_path(start_path,end_path):\n if( os.path.ismount( start_path ) ) :\n print(\"%s is mounted\" % start_path)\n else:\n print(\"%s is not mounted\" % start_path)\n exit()\n\n complete_path = os.path.join(start_path,end_path)\n cmd = 'btrfs subvolume list \"%s\" > /dev/null 2>&1' % complete_path\n p = subprocess.Popen(cmd,cwd='/',shell=True)\n p.wait()\n print(p.returncode)\n if (not p.returncode):\n print(\"E: %s already exist!\"%complete_path)\n exit()\n else:\n cmd = 'btrfs subvolume create \"%s\"' % complete_path\n p = subprocess.Popen(cmd,cwd='/',shell=True)\n p.wait()\n print(p.returncode)\n \nif __name__ == \"__main__\":\n\n if os.geteuid() != 0:\n print(\"You must be root\")\n exit()\n\n parser = argparse.ArgumentParser(description=\"Create a Sbuild Chroot\",formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-d','--distro',metavar=\"DISTRIBUTION\",help=\"Install specific distro\",default=\"debian\")\n parser.add_argument('-a','--arch',metavar=\"ARCHITECTURE\",help=\"What architecture to select\",default=\"i386\")\n parser.add_argument('-r','--release',help=\"What release to select\",default=\"unstable\")\n\n args = parser.parse_args()\n chroot_end_path = os.path.join( args.distro , \"-\".join([args.release,args.arch]) )\n check_chroot_path(chroot_start_path,chroot_end_path)\n\n configure_distro(args.distro,args.arch,args.release)\n\n pprint(distro_conf)\n cmd = [ 'sbuild-createchroot' ,\n '--verbose',\n '--keyring=%s' % distro_conf['keyring'] ,\n '--arch=%s' % args.arch ,\n '--include=%s' % include,\n '--components=%s' % \",\".join(distro_conf['components']),\n args.release ,\n os.path.join(chroot_start_path,chroot_end_path),\n distro_conf['debootstrap_mirror'],\n ]\n pprint(cmd)\n p = subprocess.Popen(cmd,cwd='/')\n p.wait()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 10:28:04 2020
@author: Maxi
"""
import numpy as np
from ase.io import read
from RDF_3D import pairCorrelationFunction_3D
import matplotlib.pyplot as plt
filename = r"C:\Users\Maxi\Desktop\t\Ag_HfO2_cat_3.125_222_t.cif"
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3] # only select the cell length
dr = 0.01 # shperical shell radius dr
min_length_cell = min(cell_length) # select the smalles length in cell
rmax = min_length_cell / 10
x = corrdinates[:, 0] # split the 2d array into x, y, z coordinates
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim( (0, rmax) )
plt.ylim( (0, 1.05 * g_r.max()) )
plt.show()
|
normal
|
{
"blob_id": "516d9790f40c021d45302948b7fba0cf3e00da0a",
"index": 6322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-3": "<mask token>\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\nfilename = 'C:\\\\Users\\\\Maxi\\\\Desktop\\\\t\\\\Ag_HfO2_cat_3.125_222_t.cif'\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3]\ndr = 0.01\nmin_length_cell = min(cell_length)\nrmax = min_length_cell / 10\nx = corrdinates[:, 0]\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr\n )\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim((0, rmax))\nplt.ylim((0, 1.05 * g_r.max()))\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 15 10:28:04 2020\n\n@author: Maxi\n\"\"\"\nimport numpy as np\nfrom ase.io import read\nfrom RDF_3D import pairCorrelationFunction_3D\nimport matplotlib.pyplot as plt\n \n\nfilename = r\"C:\\Users\\Maxi\\Desktop\\t\\Ag_HfO2_cat_3.125_222_t.cif\"\ncrystal = read(filename)\ncorrdinates = crystal.get_positions()\ncell_length = crystal.get_cell_lengths_and_angles()\ncell_length = cell_length[0:3] # only select the cell length\n\ndr = 0.01 # shperical shell radius dr\nmin_length_cell = min(cell_length) # select the smalles length in cell\nrmax = min_length_cell / 10\nx = corrdinates[:, 0] # split the 2d array into x, y, z coordinates\ny = corrdinates[:, 1]\nz = corrdinates[:, 2]\n\ng_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)\n\nplt.figure()\nplt.plot(r, g_r, color='black')\nplt.xlabel('r')\nplt.ylabel('g(r)')\nplt.xlim( (0, rmax) )\nplt.ylim( (0, 1.05 * g_r.max()) )\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from setuptools import setup, find_packages
setup(
packages=find_packages(),
setup_requires=["flask"],
name="mith1",
)
|
normal
|
{
"blob_id": "a5a7cd112faad1096ce4c6f04b2179fbdf732702",
"index": 1479,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(packages=find_packages(), setup_requires=['flask'], name='mith1')\n",
"step-3": "from setuptools import setup, find_packages\nsetup(packages=find_packages(), setup_requires=['flask'], name='mith1')\n",
"step-4": "from setuptools import setup, find_packages\nsetup(\n packages=find_packages(),\n setup_requires=[\"flask\"],\n name=\"mith1\",\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def longest_word(s, d):
lengths = [(entry, len(entry)) for entry in d]
sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))
for word, length in sorted_d:
j = 0
for i in range(0, len(s)):
if j < len(word) and word[j] == s[i]:
j += 1
if j == len(word):
return word
return ''
print(longest_word("abpcplea", ["a", "b", "c"]))
print(longest_word("abpcplea", ["ba", "ab", "a", "b"]))
print(longest_word('abpcplea', ["ale","apple","monkey","plea"]))
|
normal
|
{
"blob_id": "86de5b4a72978e2c49e060eefc513e3ed61272ae",
"index": 4004,
"step-1": "<mask token>\n",
"step-2": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\n<mask token>\n",
"step-3": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key=lambda x: (-x[1], x[0]))\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\n\nprint(longest_word('abpcplea', ['a', 'b', 'c']))\nprint(longest_word('abpcplea', ['ba', 'ab', 'a', 'b']))\nprint(longest_word('abpcplea', ['ale', 'apple', 'monkey', 'plea']))\n",
"step-4": "def longest_word(s, d):\n lengths = [(entry, len(entry)) for entry in d]\n sorted_d = sorted(lengths, key = lambda x: (-x[1], x[0]))\n\n for word, length in sorted_d:\n j = 0\n for i in range(0, len(s)):\n if j < len(word) and word[j] == s[i]:\n j += 1\n if j == len(word):\n return word\n return ''\n\nprint(longest_word(\"abpcplea\", [\"a\", \"b\", \"c\"]))\nprint(longest_word(\"abpcplea\", [\"ba\", \"ab\", \"a\", \"b\"]))\nprint(longest_word('abpcplea', [\"ale\",\"apple\",\"monkey\",\"plea\"]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Basic(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _region_test(self, x):
if x in self.iana.region:
return True
elif x in ('XX', 'XK'):
return True
return False
def _allRows(self):
for r in self.rows:
t = langtag(r['likely_subtag'])
if t.lang.startswith('x-'):
continue
yield r, t
def test_lang(self):
""" Tests that all lang subtags are in iana """
fails = []
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:
self.fail(
'{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'
.format(l.lang, t.lang, **r))
if (t.lang not in self.iana.language and '-' not in t.lang and
t.lang not in self.extraLangs):
fails.append(r['Lang_Id'])
if not l.test(fname=langtagjson):
self.fail('{Lang_Id} failed conformance check'.format(**r))
if len(fails):
self.fail(f'{fails} langs not in IANA')
def test_region(self):
""" Test that region values are sensible and that they equal the default region.
Unknown regions do not have to be specified. """
for r, t in self._allRows():
reg = t.region
if not self._region_test(t.region):
self.fail('{likely_subtag} has irregular region'.format(**r))
for s in r['regions'].split():
if not self._region_test(s.strip()):
self.fail('{Lang_Id} has irregular region: {0} in regions'
.format(s, **r))
<|reserved_special_token_0|>
def test_variants(self):
""" Test that all variants are in IANA """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.vars is None and l.vars is None:
continue
if sorted(t.vars) != sorted(l.vars):
self.fail(
'{Lang_Id} and {likely_subtag} have different variants'
.format(**r))
for v in t.vars:
if v not in self.iana.variant:
self.fail('{likely_subtag} has bad variant {0}'.format(
v, **r))
def test_csv_columns(self):
""" Test that everyone has the right number of columns """
lc = self.fieldnames[-1]
for r in self.rows:
if len(r.get('_', [])):
self.fail('{Lang_Id} has too many columns'.format(**r))
elif r[lc] is None:
self.fail('{Lang_Id} has too few columns'.format(**r))
def test_pua(self):
""" Test that anything with -x- in Lang_Id has it in likely_subtag too """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.ns is None and l.ns is None:
continue
if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:
continue
if sorted(t.ns.keys()) != sorted(l.ns.keys()):
self.fail(
'{Lang_Id} and {likely_subtag} have different extension namespaces'
.format(**r))
for k, v in t.ns.items():
if sorted(v) != sorted(l.ns[k]):
self.fail(
'{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'
.format(k, **r))
def test_ascii(self):
""" Test that all tags are pure ascii """
for r, t in self._allRows():
for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',
'Macro', 'variants'):
if nonascii(r[cid]):
self.fail('{Lang_Id} has non ASCII in column {0} value {1}'
.format(cid, r[cid], **r))
def test_iso639(self):
""" Test that the iso639 column is either empty or 3 lower ascii chars. """
k = 'ISO 639-3'
for r, t in self._allRows():
if r[k] == '':
continue
if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x
) < 123 for x in r[k]):
self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'
.format(**r))
def test_deprecated(self):
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
inf = self.iana.language.get(l.lang, {})
if 'Deprecated' in inf:
if r['deprecated'] == '':
self.fail(
'{Lang_Id} was deprecated: {} in IANA but not in the database'
.format(inf['Deprecated'], **r))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Basic(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def setUp(self):
self.fname = os.path.join(os.path.dirname(__file__),
'../source/langtags.csv')
with open(self.fname) as csvfile:
reader = csv.DictReader(csvfile, restkey='_')
self.rows = list(reader)
self.fieldnames = reader.fieldnames
self.numlines = reader.line_num
self.iana = Iana()
def _region_test(self, x):
if x in self.iana.region:
return True
elif x in ('XX', 'XK'):
return True
return False
def _allRows(self):
for r in self.rows:
t = langtag(r['likely_subtag'])
if t.lang.startswith('x-'):
continue
yield r, t
def test_lang(self):
""" Tests that all lang subtags are in iana """
fails = []
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:
self.fail(
'{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'
.format(l.lang, t.lang, **r))
if (t.lang not in self.iana.language and '-' not in t.lang and
t.lang not in self.extraLangs):
fails.append(r['Lang_Id'])
if not l.test(fname=langtagjson):
self.fail('{Lang_Id} failed conformance check'.format(**r))
if len(fails):
self.fail(f'{fails} langs not in IANA')
def test_region(self):
""" Test that region values are sensible and that they equal the default region.
Unknown regions do not have to be specified. """
for r, t in self._allRows():
reg = t.region
if not self._region_test(t.region):
self.fail('{likely_subtag} has irregular region'.format(**r))
for s in r['regions'].split():
if not self._region_test(s.strip()):
self.fail('{Lang_Id} has irregular region: {0} in regions'
.format(s, **r))
def test_script(self):
""" Qaa? type scripts must have an -x- for the script name """
for r, t in self._allRows():
scr = t.script
if scr is not None and (scr.startswith('Qaa') or scr.startswith
('Qab')):
if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is
None or 'x' not in t.extensions):
self.fail('{Lang_Id} has no extension for script name'.
format(**r))
elif scr not in self.iana.script and scr not in self.extraScripts:
self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))
elif t.script not in self.iana.script and t.script not in self.extraScripts:
self.fail('{likely_subtag} has irregular script'.format(**r))
def test_variants(self):
""" Test that all variants are in IANA """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.vars is None and l.vars is None:
continue
if sorted(t.vars) != sorted(l.vars):
self.fail(
'{Lang_Id} and {likely_subtag} have different variants'
.format(**r))
for v in t.vars:
if v not in self.iana.variant:
self.fail('{likely_subtag} has bad variant {0}'.format(
v, **r))
def test_csv_columns(self):
""" Test that everyone has the right number of columns """
lc = self.fieldnames[-1]
for r in self.rows:
if len(r.get('_', [])):
self.fail('{Lang_Id} has too many columns'.format(**r))
elif r[lc] is None:
self.fail('{Lang_Id} has too few columns'.format(**r))
def test_pua(self):
""" Test that anything with -x- in Lang_Id has it in likely_subtag too """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.ns is None and l.ns is None:
continue
if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:
continue
if sorted(t.ns.keys()) != sorted(l.ns.keys()):
self.fail(
'{Lang_Id} and {likely_subtag} have different extension namespaces'
.format(**r))
for k, v in t.ns.items():
if sorted(v) != sorted(l.ns[k]):
self.fail(
'{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'
.format(k, **r))
def test_ascii(self):
""" Test that all tags are pure ascii """
for r, t in self._allRows():
for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',
'Macro', 'variants'):
if nonascii(r[cid]):
self.fail('{Lang_Id} has non ASCII in column {0} value {1}'
.format(cid, r[cid], **r))
def test_iso639(self):
""" Test that the iso639 column is either empty or 3 lower ascii chars. """
k = 'ISO 639-3'
for r, t in self._allRows():
if r[k] == '':
continue
if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x
) < 123 for x in r[k]):
self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'
.format(**r))
def test_deprecated(self):
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
inf = self.iana.language.get(l.lang, {})
if 'Deprecated' in inf:
if r['deprecated'] == '':
self.fail(
'{Lang_Id} was deprecated: {} in IANA but not in the database'
.format(inf['Deprecated'], **r))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
langtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub',
'langtags.json')
bannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]
def nonascii(s):
cs = [ord(x) for x in s]
if any(not 32 <= x < 123 or x in bannedchars for x in cs):
return True
class Basic(unittest.TestCase):
extraScripts = ['Toto', 'Vith']
extraLangs = ('000', 'cxh', 'dsk', 'dyr', 'eud', 'ikh', 'izm', 'lgs',
'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr',
'ykh', 'zem', 'zlu')
def setUp(self):
self.fname = os.path.join(os.path.dirname(__file__),
'../source/langtags.csv')
with open(self.fname) as csvfile:
reader = csv.DictReader(csvfile, restkey='_')
self.rows = list(reader)
self.fieldnames = reader.fieldnames
self.numlines = reader.line_num
self.iana = Iana()
def _region_test(self, x):
if x in self.iana.region:
return True
elif x in ('XX', 'XK'):
return True
return False
def _allRows(self):
for r in self.rows:
t = langtag(r['likely_subtag'])
if t.lang.startswith('x-'):
continue
yield r, t
def test_lang(self):
""" Tests that all lang subtags are in iana """
fails = []
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:
self.fail(
'{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'
.format(l.lang, t.lang, **r))
if (t.lang not in self.iana.language and '-' not in t.lang and
t.lang not in self.extraLangs):
fails.append(r['Lang_Id'])
if not l.test(fname=langtagjson):
self.fail('{Lang_Id} failed conformance check'.format(**r))
if len(fails):
self.fail(f'{fails} langs not in IANA')
def test_region(self):
""" Test that region values are sensible and that they equal the default region.
Unknown regions do not have to be specified. """
for r, t in self._allRows():
reg = t.region
if not self._region_test(t.region):
self.fail('{likely_subtag} has irregular region'.format(**r))
for s in r['regions'].split():
if not self._region_test(s.strip()):
self.fail('{Lang_Id} has irregular region: {0} in regions'
.format(s, **r))
def test_script(self):
""" Qaa? type scripts must have an -x- for the script name """
for r, t in self._allRows():
scr = t.script
if scr is not None and (scr.startswith('Qaa') or scr.startswith
('Qab')):
if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is
None or 'x' not in t.extensions):
self.fail('{Lang_Id} has no extension for script name'.
format(**r))
elif scr not in self.iana.script and scr not in self.extraScripts:
self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))
elif t.script not in self.iana.script and t.script not in self.extraScripts:
self.fail('{likely_subtag} has irregular script'.format(**r))
def test_variants(self):
""" Test that all variants are in IANA """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.vars is None and l.vars is None:
continue
if sorted(t.vars) != sorted(l.vars):
self.fail(
'{Lang_Id} and {likely_subtag} have different variants'
.format(**r))
for v in t.vars:
if v not in self.iana.variant:
self.fail('{likely_subtag} has bad variant {0}'.format(
v, **r))
def test_csv_columns(self):
""" Test that everyone has the right number of columns """
lc = self.fieldnames[-1]
for r in self.rows:
if len(r.get('_', [])):
self.fail('{Lang_Id} has too many columns'.format(**r))
elif r[lc] is None:
self.fail('{Lang_Id} has too few columns'.format(**r))
def test_pua(self):
""" Test that anything with -x- in Lang_Id has it in likely_subtag too """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.ns is None and l.ns is None:
continue
if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:
continue
if sorted(t.ns.keys()) != sorted(l.ns.keys()):
self.fail(
'{Lang_Id} and {likely_subtag} have different extension namespaces'
.format(**r))
for k, v in t.ns.items():
if sorted(v) != sorted(l.ns[k]):
self.fail(
'{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'
.format(k, **r))
def test_ascii(self):
""" Test that all tags are pure ascii """
for r, t in self._allRows():
for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',
'Macro', 'variants'):
if nonascii(r[cid]):
self.fail('{Lang_Id} has non ASCII in column {0} value {1}'
.format(cid, r[cid], **r))
def test_iso639(self):
""" Test that the iso639 column is either empty or 3 lower ascii chars. """
k = 'ISO 639-3'
for r, t in self._allRows():
if r[k] == '':
continue
if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x
) < 123 for x in r[k]):
self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'
.format(**r))
def test_deprecated(self):
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
inf = self.iana.language.get(l.lang, {})
if 'Deprecated' in inf:
if r['deprecated'] == '':
self.fail(
'{Lang_Id} was deprecated: {} in IANA but not in the database'
.format(inf['Deprecated'], **r))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import os, re
import csv, unittest
from langtag import langtag
from sldr.iana import Iana
langtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub',
'langtags.json')
bannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]
def nonascii(s):
cs = [ord(x) for x in s]
if any(not 32 <= x < 123 or x in bannedchars for x in cs):
return True
class Basic(unittest.TestCase):
extraScripts = ['Toto', 'Vith']
extraLangs = ('000', 'cxh', 'dsk', 'dyr', 'eud', 'ikh', 'izm', 'lgs',
'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr',
'ykh', 'zem', 'zlu')
def setUp(self):
self.fname = os.path.join(os.path.dirname(__file__),
'../source/langtags.csv')
with open(self.fname) as csvfile:
reader = csv.DictReader(csvfile, restkey='_')
self.rows = list(reader)
self.fieldnames = reader.fieldnames
self.numlines = reader.line_num
self.iana = Iana()
def _region_test(self, x):
if x in self.iana.region:
return True
elif x in ('XX', 'XK'):
return True
return False
def _allRows(self):
for r in self.rows:
t = langtag(r['likely_subtag'])
if t.lang.startswith('x-'):
continue
yield r, t
def test_lang(self):
""" Tests that all lang subtags are in iana """
fails = []
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:
self.fail(
'{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'
.format(l.lang, t.lang, **r))
if (t.lang not in self.iana.language and '-' not in t.lang and
t.lang not in self.extraLangs):
fails.append(r['Lang_Id'])
if not l.test(fname=langtagjson):
self.fail('{Lang_Id} failed conformance check'.format(**r))
if len(fails):
self.fail(f'{fails} langs not in IANA')
def test_region(self):
""" Test that region values are sensible and that they equal the default region.
Unknown regions do not have to be specified. """
for r, t in self._allRows():
reg = t.region
if not self._region_test(t.region):
self.fail('{likely_subtag} has irregular region'.format(**r))
for s in r['regions'].split():
if not self._region_test(s.strip()):
self.fail('{Lang_Id} has irregular region: {0} in regions'
.format(s, **r))
def test_script(self):
""" Qaa? type scripts must have an -x- for the script name """
for r, t in self._allRows():
scr = t.script
if scr is not None and (scr.startswith('Qaa') or scr.startswith
('Qab')):
if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is
None or 'x' not in t.extensions):
self.fail('{Lang_Id} has no extension for script name'.
format(**r))
elif scr not in self.iana.script and scr not in self.extraScripts:
self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))
elif t.script not in self.iana.script and t.script not in self.extraScripts:
self.fail('{likely_subtag} has irregular script'.format(**r))
def test_variants(self):
""" Test that all variants are in IANA """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.vars is None and l.vars is None:
continue
if sorted(t.vars) != sorted(l.vars):
self.fail(
'{Lang_Id} and {likely_subtag} have different variants'
.format(**r))
for v in t.vars:
if v not in self.iana.variant:
self.fail('{likely_subtag} has bad variant {0}'.format(
v, **r))
def test_csv_columns(self):
""" Test that everyone has the right number of columns """
lc = self.fieldnames[-1]
for r in self.rows:
if len(r.get('_', [])):
self.fail('{Lang_Id} has too many columns'.format(**r))
elif r[lc] is None:
self.fail('{Lang_Id} has too few columns'.format(**r))
def test_pua(self):
""" Test that anything with -x- in Lang_Id has it in likely_subtag too """
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.ns is None and l.ns is None:
continue
if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:
continue
if sorted(t.ns.keys()) != sorted(l.ns.keys()):
self.fail(
'{Lang_Id} and {likely_subtag} have different extension namespaces'
.format(**r))
for k, v in t.ns.items():
if sorted(v) != sorted(l.ns[k]):
self.fail(
'{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'
.format(k, **r))
def test_ascii(self):
""" Test that all tags are pure ascii """
for r, t in self._allRows():
for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',
'Macro', 'variants'):
if nonascii(r[cid]):
self.fail('{Lang_Id} has non ASCII in column {0} value {1}'
.format(cid, r[cid], **r))
def test_iso639(self):
""" Test that the iso639 column is either empty or 3 lower ascii chars. """
k = 'ISO 639-3'
for r, t in self._allRows():
if r[k] == '':
continue
if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x
) < 123 for x in r[k]):
self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'
.format(**r))
def test_deprecated(self):
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
inf = self.iana.language.get(l.lang, {})
if 'Deprecated' in inf:
if r['deprecated'] == '':
self.fail(
'{Lang_Id} was deprecated: {} in IANA but not in the database'
.format(inf['Deprecated'], **r))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
#!/usr/bin/python3
import os, re
import csv, unittest
from langtag import langtag
from sldr.iana import Iana
langtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub', 'langtags.json')
bannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]
def nonascii(s):
cs = [ord(x) for x in s]
if any(not (32 <= x < 123) or x in bannedchars for x in cs):
return True
class Basic(unittest.TestCase):
extraScripts = ["Toto", "Vith"]
extraLangs = ("000",
"cxh", "dsk", "dyr", "eud", "ikh", "izm", "lgs", # going in ~23/Mar/2023
'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr', 'ykh', 'zem', 'zlu') # going in ~23/Mar/2023
def setUp(self):
self.fname = os.path.join(os.path.dirname(__file__), '../source/langtags.csv')
with open(self.fname) as csvfile:
reader = csv.DictReader(csvfile, restkey="_")
self.rows = list(reader)
self.fieldnames = reader.fieldnames
self.numlines = reader.line_num
self.iana = Iana()
def _region_test(self, x):
if x in self.iana.region:
return True
elif x in ("XX", "XK"):
return True
return False
def _allRows(self):
for r in self.rows:
t = langtag(r['likely_subtag'])
if t.lang.startswith("x-"):
continue
yield (r, t)
def test_lang(self):
''' Tests that all lang subtags are in iana '''
fails = []
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if l.lang != t.lang and "-" not in l.lang and "-" not in t.lang:
self.fail("{Lang_Id} has different lang to {likely_subtag} ({0} != {1})".format(l.lang, t.lang, **r))
if t.lang not in self.iana.language and "-" not in t.lang and t.lang not in self.extraLangs:
fails.append(r['Lang_Id'])
if not l.test(fname=langtagjson):
self.fail("{Lang_Id} failed conformance check".format(**r))
if len(fails):
self.fail(f"{fails} langs not in IANA")
def test_region(self):
''' Test that region values are sensible and that they equal the default region.
Unknown regions do not have to be specified. '''
for r,t in self._allRows():
reg = t.region
if not self._region_test(t.region):
self.fail("{likely_subtag} has irregular region".format(**r))
for s in r['regions'].split():
if not self._region_test(s.strip()):
self.fail("{Lang_Id} has irregular region: {0} in regions".format(s, **r))
def test_script(self):
''' Qaa? type scripts must have an -x- for the script name '''
for r, t in self._allRows():
scr = t.script
if scr is not None and (scr.startswith("Qaa") or scr.startswith("Qab")):
if scr not in ("Qaax", "Qaby", "Qabz") and (t.extensions is None or 'x' not in t.extensions):
self.fail("{Lang_Id} has no extension for script name".format(**r))
elif scr not in self.iana.script and scr not in self.extraScripts:
self.fail("{Lang_Id} has irregular script {}".format(scr, **r))
elif t.script not in self.iana.script and t.script not in self.extraScripts:
self.fail("{likely_subtag} has irregular script".format(**r))
def test_variants(self):
''' Test that all variants are in IANA '''
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.vars is None and l.vars is None:
continue
if sorted(t.vars) != sorted(l.vars):
self.fail("{Lang_Id} and {likely_subtag} have different variants".format(**r))
for v in t.vars:
if v not in self.iana.variant:
self.fail("{likely_subtag} has bad variant {0}".format(v, **r))
def test_csv_columns(self):
''' Test that everyone has the right number of columns '''
lc = self.fieldnames[-1]
for r in self.rows:
if len(r.get("_", [])):
self.fail("{Lang_Id} has too many columns".format(**r))
elif r[lc] is None:
self.fail("{Lang_Id} has too few columns".format(**r))
def test_pua(self):
''' Test that anything with -x- in Lang_Id has it in likely_subtag too '''
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
if t.ns is None and l.ns is None:
continue
if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:
continue # allow a private script extension
if sorted(t.ns.keys()) != sorted(l.ns.keys()):
self.fail("{Lang_Id} and {likely_subtag} have different extension namespaces".format(**r))
for k, v in t.ns.items():
if sorted(v) != sorted(l.ns[k]):
self.fail("{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace".format(k, **r))
def test_ascii(self):
''' Test that all tags are pure ascii '''
for r, t in self._allRows():
for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3', 'Macro', 'variants'):
if nonascii(r[cid]):
self.fail("{Lang_Id} has non ASCII in column {0} value {1}".format(cid, r[cid], **r))
def test_iso639(self):
''' Test that the iso639 column is either empty or 3 lower ascii chars. '''
k = 'ISO 639-3'
for r, t in self._allRows():
if r[k] == '':
continue
if len(r[k]) != 3 or r[k].lower() != r[k] or any(not (96 < ord(x) < 123) for x in r[k]):
self.fail("{Lang_Id} has faulty ISO639 code of {ISO 639-3}".format(**r))
def test_deprecated(self):
for r, t in self._allRows():
l = langtag(r['Lang_Id'])
inf = self.iana.language.get(l.lang, {})
if 'Deprecated' in inf:
if r['deprecated'] == '':
self.fail("{Lang_Id} was deprecated: {} in IANA but not in the database".format(inf['Deprecated'], **r))
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "e4f194c3dbc3e1d62866343642e41fa1ecdeab93",
"index": 7380,
"step-1": "<mask token>\n\n\nclass Basic(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def _region_test(self, x):\n if x in self.iana.region:\n return True\n elif x in ('XX', 'XK'):\n return True\n return False\n\n def _allRows(self):\n for r in self.rows:\n t = langtag(r['likely_subtag'])\n if t.lang.startswith('x-'):\n continue\n yield r, t\n\n def test_lang(self):\n \"\"\" Tests that all lang subtags are in iana \"\"\"\n fails = []\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:\n self.fail(\n '{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'\n .format(l.lang, t.lang, **r))\n if (t.lang not in self.iana.language and '-' not in t.lang and \n t.lang not in self.extraLangs):\n fails.append(r['Lang_Id'])\n if not l.test(fname=langtagjson):\n self.fail('{Lang_Id} failed conformance check'.format(**r))\n if len(fails):\n self.fail(f'{fails} langs not in IANA')\n\n def test_region(self):\n \"\"\" Test that region values are sensible and that they equal the default region.\n Unknown regions do not have to be specified. \"\"\"\n for r, t in self._allRows():\n reg = t.region\n if not self._region_test(t.region):\n self.fail('{likely_subtag} has irregular region'.format(**r))\n for s in r['regions'].split():\n if not self._region_test(s.strip()):\n self.fail('{Lang_Id} has irregular region: {0} in regions'\n .format(s, **r))\n <mask token>\n\n def test_variants(self):\n \"\"\" Test that all variants are in IANA \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.vars is None and l.vars is None:\n continue\n if sorted(t.vars) != sorted(l.vars):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different variants'\n .format(**r))\n for v in t.vars:\n if v not in self.iana.variant:\n self.fail('{likely_subtag} has bad variant {0}'.format(\n v, **r))\n\n def test_csv_columns(self):\n \"\"\" Test that everyone has the right number of columns \"\"\"\n lc = self.fieldnames[-1]\n for r in self.rows:\n if len(r.get('_', [])):\n self.fail('{Lang_Id} has too many columns'.format(**r))\n elif r[lc] is None:\n self.fail('{Lang_Id} has too few columns'.format(**r))\n\n def test_pua(self):\n \"\"\" Test that anything with -x- in Lang_Id has it in likely_subtag too \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.ns is None and l.ns is None:\n continue\n if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:\n continue\n if sorted(t.ns.keys()) != sorted(l.ns.keys()):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extension namespaces'\n .format(**r))\n for k, v in t.ns.items():\n if sorted(v) != sorted(l.ns[k]):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'\n .format(k, **r))\n\n def test_ascii(self):\n \"\"\" Test that all tags are pure ascii \"\"\"\n for r, t in self._allRows():\n for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',\n 'Macro', 'variants'):\n if nonascii(r[cid]):\n self.fail('{Lang_Id} has non ASCII in column {0} value {1}'\n .format(cid, r[cid], **r))\n\n def test_iso639(self):\n \"\"\" Test that the iso639 column is either empty or 3 lower ascii chars. \"\"\"\n k = 'ISO 639-3'\n for r, t in self._allRows():\n if r[k] == '':\n continue\n if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x\n ) < 123 for x in r[k]):\n self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'\n .format(**r))\n\n def test_deprecated(self):\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n inf = self.iana.language.get(l.lang, {})\n if 'Deprecated' in inf:\n if r['deprecated'] == '':\n self.fail(\n '{Lang_Id} was deprecated: {} in IANA but not in the database'\n .format(inf['Deprecated'], **r))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Basic(unittest.TestCase):\n <mask token>\n <mask token>\n\n def setUp(self):\n self.fname = os.path.join(os.path.dirname(__file__),\n '../source/langtags.csv')\n with open(self.fname) as csvfile:\n reader = csv.DictReader(csvfile, restkey='_')\n self.rows = list(reader)\n self.fieldnames = reader.fieldnames\n self.numlines = reader.line_num\n self.iana = Iana()\n\n def _region_test(self, x):\n if x in self.iana.region:\n return True\n elif x in ('XX', 'XK'):\n return True\n return False\n\n def _allRows(self):\n for r in self.rows:\n t = langtag(r['likely_subtag'])\n if t.lang.startswith('x-'):\n continue\n yield r, t\n\n def test_lang(self):\n \"\"\" Tests that all lang subtags are in iana \"\"\"\n fails = []\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:\n self.fail(\n '{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'\n .format(l.lang, t.lang, **r))\n if (t.lang not in self.iana.language and '-' not in t.lang and \n t.lang not in self.extraLangs):\n fails.append(r['Lang_Id'])\n if not l.test(fname=langtagjson):\n self.fail('{Lang_Id} failed conformance check'.format(**r))\n if len(fails):\n self.fail(f'{fails} langs not in IANA')\n\n def test_region(self):\n \"\"\" Test that region values are sensible and that they equal the default region.\n Unknown regions do not have to be specified. \"\"\"\n for r, t in self._allRows():\n reg = t.region\n if not self._region_test(t.region):\n self.fail('{likely_subtag} has irregular region'.format(**r))\n for s in r['regions'].split():\n if not self._region_test(s.strip()):\n self.fail('{Lang_Id} has irregular region: {0} in regions'\n .format(s, **r))\n\n def test_script(self):\n \"\"\" Qaa? type scripts must have an -x- for the script name \"\"\"\n for r, t in self._allRows():\n scr = t.script\n if scr is not None and (scr.startswith('Qaa') or scr.startswith\n ('Qab')):\n if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is\n None or 'x' not in t.extensions):\n self.fail('{Lang_Id} has no extension for script name'.\n format(**r))\n elif scr not in self.iana.script and scr not in self.extraScripts:\n self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))\n elif t.script not in self.iana.script and t.script not in self.extraScripts:\n self.fail('{likely_subtag} has irregular script'.format(**r))\n\n def test_variants(self):\n \"\"\" Test that all variants are in IANA \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.vars is None and l.vars is None:\n continue\n if sorted(t.vars) != sorted(l.vars):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different variants'\n .format(**r))\n for v in t.vars:\n if v not in self.iana.variant:\n self.fail('{likely_subtag} has bad variant {0}'.format(\n v, **r))\n\n def test_csv_columns(self):\n \"\"\" Test that everyone has the right number of columns \"\"\"\n lc = self.fieldnames[-1]\n for r in self.rows:\n if len(r.get('_', [])):\n self.fail('{Lang_Id} has too many columns'.format(**r))\n elif r[lc] is None:\n self.fail('{Lang_Id} has too few columns'.format(**r))\n\n def test_pua(self):\n \"\"\" Test that anything with -x- in Lang_Id has it in likely_subtag too \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.ns is None and l.ns is None:\n continue\n if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:\n continue\n if sorted(t.ns.keys()) != sorted(l.ns.keys()):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extension namespaces'\n .format(**r))\n for k, v in t.ns.items():\n if sorted(v) != sorted(l.ns[k]):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'\n .format(k, **r))\n\n def test_ascii(self):\n \"\"\" Test that all tags are pure ascii \"\"\"\n for r, t in self._allRows():\n for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',\n 'Macro', 'variants'):\n if nonascii(r[cid]):\n self.fail('{Lang_Id} has non ASCII in column {0} value {1}'\n .format(cid, r[cid], **r))\n\n def test_iso639(self):\n \"\"\" Test that the iso639 column is either empty or 3 lower ascii chars. \"\"\"\n k = 'ISO 639-3'\n for r, t in self._allRows():\n if r[k] == '':\n continue\n if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x\n ) < 123 for x in r[k]):\n self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'\n .format(**r))\n\n def test_deprecated(self):\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n inf = self.iana.language.get(l.lang, {})\n if 'Deprecated' in inf:\n if r['deprecated'] == '':\n self.fail(\n '{Lang_Id} was deprecated: {} in IANA but not in the database'\n .format(inf['Deprecated'], **r))\n\n\n<mask token>\n",
"step-3": "<mask token>\nlangtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub',\n 'langtags.json')\nbannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]\n\n\ndef nonascii(s):\n cs = [ord(x) for x in s]\n if any(not 32 <= x < 123 or x in bannedchars for x in cs):\n return True\n\n\nclass Basic(unittest.TestCase):\n extraScripts = ['Toto', 'Vith']\n extraLangs = ('000', 'cxh', 'dsk', 'dyr', 'eud', 'ikh', 'izm', 'lgs',\n 'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr',\n 'ykh', 'zem', 'zlu')\n\n def setUp(self):\n self.fname = os.path.join(os.path.dirname(__file__),\n '../source/langtags.csv')\n with open(self.fname) as csvfile:\n reader = csv.DictReader(csvfile, restkey='_')\n self.rows = list(reader)\n self.fieldnames = reader.fieldnames\n self.numlines = reader.line_num\n self.iana = Iana()\n\n def _region_test(self, x):\n if x in self.iana.region:\n return True\n elif x in ('XX', 'XK'):\n return True\n return False\n\n def _allRows(self):\n for r in self.rows:\n t = langtag(r['likely_subtag'])\n if t.lang.startswith('x-'):\n continue\n yield r, t\n\n def test_lang(self):\n \"\"\" Tests that all lang subtags are in iana \"\"\"\n fails = []\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:\n self.fail(\n '{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'\n .format(l.lang, t.lang, **r))\n if (t.lang not in self.iana.language and '-' not in t.lang and \n t.lang not in self.extraLangs):\n fails.append(r['Lang_Id'])\n if not l.test(fname=langtagjson):\n self.fail('{Lang_Id} failed conformance check'.format(**r))\n if len(fails):\n self.fail(f'{fails} langs not in IANA')\n\n def test_region(self):\n \"\"\" Test that region values are sensible and that they equal the default region.\n Unknown regions do not have to be specified. \"\"\"\n for r, t in self._allRows():\n reg = t.region\n if not self._region_test(t.region):\n self.fail('{likely_subtag} has irregular region'.format(**r))\n for s in r['regions'].split():\n if not self._region_test(s.strip()):\n self.fail('{Lang_Id} has irregular region: {0} in regions'\n .format(s, **r))\n\n def test_script(self):\n \"\"\" Qaa? type scripts must have an -x- for the script name \"\"\"\n for r, t in self._allRows():\n scr = t.script\n if scr is not None and (scr.startswith('Qaa') or scr.startswith\n ('Qab')):\n if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is\n None or 'x' not in t.extensions):\n self.fail('{Lang_Id} has no extension for script name'.\n format(**r))\n elif scr not in self.iana.script and scr not in self.extraScripts:\n self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))\n elif t.script not in self.iana.script and t.script not in self.extraScripts:\n self.fail('{likely_subtag} has irregular script'.format(**r))\n\n def test_variants(self):\n \"\"\" Test that all variants are in IANA \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.vars is None and l.vars is None:\n continue\n if sorted(t.vars) != sorted(l.vars):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different variants'\n .format(**r))\n for v in t.vars:\n if v not in self.iana.variant:\n self.fail('{likely_subtag} has bad variant {0}'.format(\n v, **r))\n\n def test_csv_columns(self):\n \"\"\" Test that everyone has the right number of columns \"\"\"\n lc = self.fieldnames[-1]\n for r in self.rows:\n if len(r.get('_', [])):\n self.fail('{Lang_Id} has too many columns'.format(**r))\n elif r[lc] is None:\n self.fail('{Lang_Id} has too few columns'.format(**r))\n\n def test_pua(self):\n \"\"\" Test that anything with -x- in Lang_Id has it in likely_subtag too \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.ns is None and l.ns is None:\n continue\n if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:\n continue\n if sorted(t.ns.keys()) != sorted(l.ns.keys()):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extension namespaces'\n .format(**r))\n for k, v in t.ns.items():\n if sorted(v) != sorted(l.ns[k]):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'\n .format(k, **r))\n\n def test_ascii(self):\n \"\"\" Test that all tags are pure ascii \"\"\"\n for r, t in self._allRows():\n for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',\n 'Macro', 'variants'):\n if nonascii(r[cid]):\n self.fail('{Lang_Id} has non ASCII in column {0} value {1}'\n .format(cid, r[cid], **r))\n\n def test_iso639(self):\n \"\"\" Test that the iso639 column is either empty or 3 lower ascii chars. \"\"\"\n k = 'ISO 639-3'\n for r, t in self._allRows():\n if r[k] == '':\n continue\n if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x\n ) < 123 for x in r[k]):\n self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'\n .format(**r))\n\n def test_deprecated(self):\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n inf = self.iana.language.get(l.lang, {})\n if 'Deprecated' in inf:\n if r['deprecated'] == '':\n self.fail(\n '{Lang_Id} was deprecated: {} in IANA but not in the database'\n .format(inf['Deprecated'], **r))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import os, re\nimport csv, unittest\nfrom langtag import langtag\nfrom sldr.iana import Iana\nlangtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub',\n 'langtags.json')\nbannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]\n\n\ndef nonascii(s):\n cs = [ord(x) for x in s]\n if any(not 32 <= x < 123 or x in bannedchars for x in cs):\n return True\n\n\nclass Basic(unittest.TestCase):\n extraScripts = ['Toto', 'Vith']\n extraLangs = ('000', 'cxh', 'dsk', 'dyr', 'eud', 'ikh', 'izm', 'lgs',\n 'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr',\n 'ykh', 'zem', 'zlu')\n\n def setUp(self):\n self.fname = os.path.join(os.path.dirname(__file__),\n '../source/langtags.csv')\n with open(self.fname) as csvfile:\n reader = csv.DictReader(csvfile, restkey='_')\n self.rows = list(reader)\n self.fieldnames = reader.fieldnames\n self.numlines = reader.line_num\n self.iana = Iana()\n\n def _region_test(self, x):\n if x in self.iana.region:\n return True\n elif x in ('XX', 'XK'):\n return True\n return False\n\n def _allRows(self):\n for r in self.rows:\n t = langtag(r['likely_subtag'])\n if t.lang.startswith('x-'):\n continue\n yield r, t\n\n def test_lang(self):\n \"\"\" Tests that all lang subtags are in iana \"\"\"\n fails = []\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if l.lang != t.lang and '-' not in l.lang and '-' not in t.lang:\n self.fail(\n '{Lang_Id} has different lang to {likely_subtag} ({0} != {1})'\n .format(l.lang, t.lang, **r))\n if (t.lang not in self.iana.language and '-' not in t.lang and \n t.lang not in self.extraLangs):\n fails.append(r['Lang_Id'])\n if not l.test(fname=langtagjson):\n self.fail('{Lang_Id} failed conformance check'.format(**r))\n if len(fails):\n self.fail(f'{fails} langs not in IANA')\n\n def test_region(self):\n \"\"\" Test that region values are sensible and that they equal the default region.\n Unknown regions do not have to be specified. \"\"\"\n for r, t in self._allRows():\n reg = t.region\n if not self._region_test(t.region):\n self.fail('{likely_subtag} has irregular region'.format(**r))\n for s in r['regions'].split():\n if not self._region_test(s.strip()):\n self.fail('{Lang_Id} has irregular region: {0} in regions'\n .format(s, **r))\n\n def test_script(self):\n \"\"\" Qaa? type scripts must have an -x- for the script name \"\"\"\n for r, t in self._allRows():\n scr = t.script\n if scr is not None and (scr.startswith('Qaa') or scr.startswith\n ('Qab')):\n if scr not in ('Qaax', 'Qaby', 'Qabz') and (t.extensions is\n None or 'x' not in t.extensions):\n self.fail('{Lang_Id} has no extension for script name'.\n format(**r))\n elif scr not in self.iana.script and scr not in self.extraScripts:\n self.fail('{Lang_Id} has irregular script {}'.format(scr, **r))\n elif t.script not in self.iana.script and t.script not in self.extraScripts:\n self.fail('{likely_subtag} has irregular script'.format(**r))\n\n def test_variants(self):\n \"\"\" Test that all variants are in IANA \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.vars is None and l.vars is None:\n continue\n if sorted(t.vars) != sorted(l.vars):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different variants'\n .format(**r))\n for v in t.vars:\n if v not in self.iana.variant:\n self.fail('{likely_subtag} has bad variant {0}'.format(\n v, **r))\n\n def test_csv_columns(self):\n \"\"\" Test that everyone has the right number of columns \"\"\"\n lc = self.fieldnames[-1]\n for r in self.rows:\n if len(r.get('_', [])):\n self.fail('{Lang_Id} has too many columns'.format(**r))\n elif r[lc] is None:\n self.fail('{Lang_Id} has too few columns'.format(**r))\n\n def test_pua(self):\n \"\"\" Test that anything with -x- in Lang_Id has it in likely_subtag too \"\"\"\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.ns is None and l.ns is None:\n continue\n if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:\n continue\n if sorted(t.ns.keys()) != sorted(l.ns.keys()):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extension namespaces'\n .format(**r))\n for k, v in t.ns.items():\n if sorted(v) != sorted(l.ns[k]):\n self.fail(\n '{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace'\n .format(k, **r))\n\n def test_ascii(self):\n \"\"\" Test that all tags are pure ascii \"\"\"\n for r, t in self._allRows():\n for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3',\n 'Macro', 'variants'):\n if nonascii(r[cid]):\n self.fail('{Lang_Id} has non ASCII in column {0} value {1}'\n .format(cid, r[cid], **r))\n\n def test_iso639(self):\n \"\"\" Test that the iso639 column is either empty or 3 lower ascii chars. \"\"\"\n k = 'ISO 639-3'\n for r, t in self._allRows():\n if r[k] == '':\n continue\n if len(r[k]) != 3 or r[k].lower() != r[k] or any(not 96 < ord(x\n ) < 123 for x in r[k]):\n self.fail('{Lang_Id} has faulty ISO639 code of {ISO 639-3}'\n .format(**r))\n\n def test_deprecated(self):\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n inf = self.iana.language.get(l.lang, {})\n if 'Deprecated' in inf:\n if r['deprecated'] == '':\n self.fail(\n '{Lang_Id} was deprecated: {} in IANA but not in the database'\n .format(inf['Deprecated'], **r))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python3\n\nimport os, re\nimport csv, unittest\nfrom langtag import langtag\nfrom sldr.iana import Iana\n\nlangtagjson = os.path.join(os.path.dirname(__file__), '..', 'pub', 'langtags.json')\nbannedchars = list(range(33, 45)) + [47] + list(range(58, 63)) + [94, 96]\ndef nonascii(s):\n cs = [ord(x) for x in s]\n if any(not (32 <= x < 123) or x in bannedchars for x in cs):\n return True\n\nclass Basic(unittest.TestCase):\n\n extraScripts = [\"Toto\", \"Vith\"]\n extraLangs = (\"000\", \n \"cxh\", \"dsk\", \"dyr\", \"eud\", \"ikh\", \"izm\", \"lgs\", # going in ~23/Mar/2023\n 'lvl', 'nzr', 'pze', 'rsw', 'tvi', 'uly', 'vjk', 'wtb', 'ycr', 'ykh', 'zem', 'zlu') # going in ~23/Mar/2023\n\n def setUp(self):\n self.fname = os.path.join(os.path.dirname(__file__), '../source/langtags.csv')\n with open(self.fname) as csvfile:\n reader = csv.DictReader(csvfile, restkey=\"_\")\n self.rows = list(reader)\n self.fieldnames = reader.fieldnames\n self.numlines = reader.line_num\n self.iana = Iana()\n\n def _region_test(self, x):\n if x in self.iana.region:\n return True\n elif x in (\"XX\", \"XK\"):\n return True\n return False\n\n def _allRows(self):\n for r in self.rows:\n t = langtag(r['likely_subtag'])\n if t.lang.startswith(\"x-\"):\n continue\n yield (r, t)\n\n def test_lang(self):\n ''' Tests that all lang subtags are in iana '''\n fails = []\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if l.lang != t.lang and \"-\" not in l.lang and \"-\" not in t.lang:\n self.fail(\"{Lang_Id} has different lang to {likely_subtag} ({0} != {1})\".format(l.lang, t.lang, **r))\n if t.lang not in self.iana.language and \"-\" not in t.lang and t.lang not in self.extraLangs:\n fails.append(r['Lang_Id'])\n if not l.test(fname=langtagjson):\n self.fail(\"{Lang_Id} failed conformance check\".format(**r))\n if len(fails):\n self.fail(f\"{fails} langs not in IANA\")\n\n\n def test_region(self):\n ''' Test that region values are sensible and that they equal the default region.\n Unknown regions do not have to be specified. '''\n for r,t in self._allRows():\n reg = t.region\n if not self._region_test(t.region):\n self.fail(\"{likely_subtag} has irregular region\".format(**r))\n for s in r['regions'].split():\n if not self._region_test(s.strip()):\n self.fail(\"{Lang_Id} has irregular region: {0} in regions\".format(s, **r))\n\n def test_script(self):\n ''' Qaa? type scripts must have an -x- for the script name '''\n for r, t in self._allRows():\n scr = t.script\n if scr is not None and (scr.startswith(\"Qaa\") or scr.startswith(\"Qab\")):\n if scr not in (\"Qaax\", \"Qaby\", \"Qabz\") and (t.extensions is None or 'x' not in t.extensions):\n self.fail(\"{Lang_Id} has no extension for script name\".format(**r))\n elif scr not in self.iana.script and scr not in self.extraScripts:\n self.fail(\"{Lang_Id} has irregular script {}\".format(scr, **r))\n elif t.script not in self.iana.script and t.script not in self.extraScripts:\n self.fail(\"{likely_subtag} has irregular script\".format(**r))\n\n def test_variants(self):\n ''' Test that all variants are in IANA '''\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.vars is None and l.vars is None:\n continue\n if sorted(t.vars) != sorted(l.vars):\n self.fail(\"{Lang_Id} and {likely_subtag} have different variants\".format(**r))\n for v in t.vars:\n if v not in self.iana.variant:\n self.fail(\"{likely_subtag} has bad variant {0}\".format(v, **r))\n\n def test_csv_columns(self):\n ''' Test that everyone has the right number of columns '''\n lc = self.fieldnames[-1]\n for r in self.rows:\n if len(r.get(\"_\", [])):\n self.fail(\"{Lang_Id} has too many columns\".format(**r))\n elif r[lc] is None:\n self.fail(\"{Lang_Id} has too few columns\".format(**r))\n\n def test_pua(self):\n ''' Test that anything with -x- in Lang_Id has it in likely_subtag too '''\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n if t.ns is None and l.ns is None:\n continue\n if len(t.ns) == 1 and 'x' in t.ns and len(t.ns['x']) == 1:\n continue # allow a private script extension\n if sorted(t.ns.keys()) != sorted(l.ns.keys()):\n self.fail(\"{Lang_Id} and {likely_subtag} have different extension namespaces\".format(**r))\n for k, v in t.ns.items():\n if sorted(v) != sorted(l.ns[k]):\n self.fail(\"{Lang_Id} and {likely_subtag} have different extensions in the {0} namespace\".format(k, **r))\n\n def test_ascii(self):\n ''' Test that all tags are pure ascii '''\n for r, t in self._allRows():\n for cid in ('Lang_Id', 'likely_subtag', 'regions', 'ISO 639-3', 'Macro', 'variants'):\n if nonascii(r[cid]):\n self.fail(\"{Lang_Id} has non ASCII in column {0} value {1}\".format(cid, r[cid], **r))\n\n def test_iso639(self):\n ''' Test that the iso639 column is either empty or 3 lower ascii chars. '''\n k = 'ISO 639-3'\n for r, t in self._allRows():\n if r[k] == '':\n continue\n if len(r[k]) != 3 or r[k].lower() != r[k] or any(not (96 < ord(x) < 123) for x in r[k]):\n self.fail(\"{Lang_Id} has faulty ISO639 code of {ISO 639-3}\".format(**r))\n\n def test_deprecated(self):\n for r, t in self._allRows():\n l = langtag(r['Lang_Id'])\n inf = self.iana.language.get(l.lang, {})\n if 'Deprecated' in inf:\n if r['deprecated'] == '':\n self.fail(\"{Lang_Id} was deprecated: {} in IANA but not in the database\".format(inf['Deprecated'], **r))\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
11,
13,
17,
18,
19
]
}
|
[
11,
13,
17,
18,
19
] |
from azfs.az_file_client import (
AzFileClient,
export_decorator
)
from azfs.az_file_system import AzFileSystem
from azfs.utils import BlobPathDecoder
from .table_storage import (
TableStorage,
TableStorageWrapper
)
# comparable tuple
VERSION = (0, 2, 14)
# generate __version__ via VERSION tuple
__version__ = ".".join(map(str, VERSION))
__all__ = [
"AzFileClient",
"AzFileSystem",
"BlobPathDecoder",
"TableStorage",
"TableStorageWrapper",
"export_decorator"
]
|
normal
|
{
"blob_id": "e7239b4bc3db9bd427b9be888621f66e81b5edeb",
"index": 2242,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nVERSION = 0, 2, 14\n__version__ = '.'.join(map(str, VERSION))\n__all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder',\n 'TableStorage', 'TableStorageWrapper', 'export_decorator']\n",
"step-3": "from azfs.az_file_client import AzFileClient, export_decorator\nfrom azfs.az_file_system import AzFileSystem\nfrom azfs.utils import BlobPathDecoder\nfrom .table_storage import TableStorage, TableStorageWrapper\nVERSION = 0, 2, 14\n__version__ = '.'.join(map(str, VERSION))\n__all__ = ['AzFileClient', 'AzFileSystem', 'BlobPathDecoder',\n 'TableStorage', 'TableStorageWrapper', 'export_decorator']\n",
"step-4": "from azfs.az_file_client import (\n AzFileClient,\n export_decorator\n)\n\nfrom azfs.az_file_system import AzFileSystem\nfrom azfs.utils import BlobPathDecoder\n\nfrom .table_storage import (\n TableStorage,\n TableStorageWrapper\n)\n\n# comparable tuple\nVERSION = (0, 2, 14)\n# generate __version__ via VERSION tuple\n__version__ = \".\".join(map(str, VERSION))\n\n__all__ = [\n \"AzFileClient\",\n \"AzFileSystem\",\n \"BlobPathDecoder\",\n \"TableStorage\",\n \"TableStorageWrapper\",\n \"export_decorator\"\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BytesWriter:
<|reserved_special_token_0|>
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file['test'], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding='bytes'), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter='\n'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,
encoding='utf8'), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]
)
assert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter='\n'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])
<|reserved_special_token_1|>
import re
import sys
import zipfile
import pathlib
from typing import IO, Any
from collections.abc import Mapping
import numpy.typing as npt
import numpy as np
from numpy.lib._npyio_impl import BagObj
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) ->None:
...
class BytesReader:
def read(self, n: int=...) ->bytes:
...
def seek(self, offset: int, whence: int=...) ->int:
...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file['test'], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding='bytes'), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter='\n'), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,
encoding='utf8'), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.
float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]
)
assert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter='\n'), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])
<|reserved_special_token_1|>
import re
import sys
import zipfile
import pathlib
from typing import IO, Any
from collections.abc import Mapping
import numpy.typing as npt
import numpy as np
from numpy.lib._npyio_impl import BagObj
if sys.version_info >= (3, 11):
from typing import assert_type
else:
from typing_extensions import assert_type
str_path: str
pathlib_path: pathlib.Path
str_file: IO[str]
bytes_file: IO[bytes]
npz_file: np.lib.npyio.NpzFile
AR_i8: npt.NDArray[np.int64]
AR_LIKE_f8: list[float]
class BytesWriter:
def write(self, data: bytes) -> None: ...
class BytesReader:
def read(self, n: int = ...) -> bytes: ...
def seek(self, offset: int, whence: int = ...) -> int: ...
bytes_writer: BytesWriter
bytes_reader: BytesReader
assert_type(npz_file.zip, zipfile.ZipFile)
assert_type(npz_file.fid, None | IO[str])
assert_type(npz_file.files, list[str])
assert_type(npz_file.allow_pickle, bool)
assert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])
assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])
assert_type(npz_file["test"], npt.NDArray[Any])
assert_type(len(npz_file), int)
with npz_file as f:
assert_type(f, np.lib.npyio.NpzFile)
assert_type(np.load(bytes_file), Any)
assert_type(np.load(pathlib_path, allow_pickle=True), Any)
assert_type(np.load(str_path, encoding="bytes"), Any)
assert_type(np.load(bytes_reader), Any)
assert_type(np.save(bytes_file, AR_LIKE_f8), None)
assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)
assert_type(np.save(str_path, AR_LIKE_f8), None)
assert_type(np.save(bytes_writer, AR_LIKE_f8), None)
assert_type(np.savez(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)
assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)
assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)
assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])
assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])
assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64])
assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])
assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any])
assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_])
assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64])
assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])
assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])
assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any])
assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any])
assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])
assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any])
|
flexible
|
{
"blob_id": "e2f134f5ff00405396b8bbf4edc263b70ef5d972",
"index": 2435,
"step-1": "<mask token>\n\n\nclass BytesWriter:\n <mask token>\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\nnpz_file: np.lib.npyio.NpzFile\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file['test'], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding='bytes'), Any)\nassert_type(np.load(bytes_reader), Any)\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter='\\n'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,\n encoding='utf8'), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]\n )\nassert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter='\\n'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])\n",
"step-4": "import re\nimport sys\nimport zipfile\nimport pathlib\nfrom typing import IO, Any\nfrom collections.abc import Mapping\nimport numpy.typing as npt\nimport numpy as np\nfrom numpy.lib._npyio_impl import BagObj\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\nnpz_file: np.lib.npyio.NpzFile\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\n\nclass BytesWriter:\n\n def write(self, data: bytes) ->None:\n ...\n\n\nclass BytesReader:\n\n def read(self, n: int=...) ->bytes:\n ...\n\n def seek(self, offset: int, whence: int=...) ->int:\n ...\n\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file['test'], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding='bytes'), Any)\nassert_type(np.load(bytes_reader), Any)\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments='test'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter='\\n'), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt(['1', '2', '3']), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_file, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(str_file, b'test', dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile('test'), dtype=np.str_,\n encoding='utf8'), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.fromregex(bytes_reader, 'test', np.float64), npt.NDArray[np.\n float64])\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]\n )\nassert_type(np.genfromtxt(str_file, comments='test'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter='\\n'), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(['1', '2', '3'], ndmin=2), npt.NDArray[Any])\n",
"step-5": "import re\nimport sys\nimport zipfile\nimport pathlib\nfrom typing import IO, Any\nfrom collections.abc import Mapping\n\nimport numpy.typing as npt\nimport numpy as np\nfrom numpy.lib._npyio_impl import BagObj\n\nif sys.version_info >= (3, 11):\n from typing import assert_type\nelse:\n from typing_extensions import assert_type\n\nstr_path: str\npathlib_path: pathlib.Path\nstr_file: IO[str]\nbytes_file: IO[bytes]\n\nnpz_file: np.lib.npyio.NpzFile\n\nAR_i8: npt.NDArray[np.int64]\nAR_LIKE_f8: list[float]\n\nclass BytesWriter:\n def write(self, data: bytes) -> None: ...\n\nclass BytesReader:\n def read(self, n: int = ...) -> bytes: ...\n def seek(self, offset: int, whence: int = ...) -> int: ...\n\nbytes_writer: BytesWriter\nbytes_reader: BytesReader\n\nassert_type(npz_file.zip, zipfile.ZipFile)\nassert_type(npz_file.fid, None | IO[str])\nassert_type(npz_file.files, list[str])\nassert_type(npz_file.allow_pickle, bool)\nassert_type(npz_file.pickle_kwargs, None | Mapping[str, Any])\nassert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile])\nassert_type(npz_file[\"test\"], npt.NDArray[Any])\nassert_type(len(npz_file), int)\nwith npz_file as f:\n assert_type(f, np.lib.npyio.NpzFile)\n\nassert_type(np.load(bytes_file), Any)\nassert_type(np.load(pathlib_path, allow_pickle=True), Any)\nassert_type(np.load(str_path, encoding=\"bytes\"), Any)\nassert_type(np.load(bytes_reader), Any)\n\nassert_type(np.save(bytes_file, AR_LIKE_f8), None)\nassert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None)\nassert_type(np.save(str_path, AR_LIKE_f8), None)\nassert_type(np.save(bytes_writer, AR_LIKE_f8), None)\n\nassert_type(np.savez(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\n\nassert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None)\nassert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None)\nassert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None)\nassert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None)\n\nassert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64])\nassert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any])\nassert_type(np.loadtxt(str_file, comments=\"test\"), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, delimiter=\"\\n\"), npt.NDArray[np.float64])\nassert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64])\nassert_type(np.loadtxt([\"1\", \"2\", \"3\"]), npt.NDArray[np.float64])\n\nassert_type(np.fromregex(bytes_file, \"test\", np.float64), npt.NDArray[np.float64])\nassert_type(np.fromregex(str_file, b\"test\", dtype=float), npt.NDArray[Any])\nassert_type(np.fromregex(str_path, re.compile(\"test\"), dtype=np.str_, encoding=\"utf8\"), npt.NDArray[np.str_])\nassert_type(np.fromregex(pathlib_path, \"test\", np.float64), npt.NDArray[np.float64])\nassert_type(np.fromregex(bytes_reader, \"test\", np.float64), npt.NDArray[np.float64])\n\nassert_type(np.genfromtxt(bytes_file), npt.NDArray[Any])\nassert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_])\nassert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_file, comments=\"test\"), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, delimiter=\"\\n\"), npt.NDArray[Any])\nassert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any])\nassert_type(np.genfromtxt([\"1\", \"2\", \"3\"], ndmin=2), npt.NDArray[Any])\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'adverts'
urlpatterns = [path('', views.AdvertListView.as_view(), name='list'), path(
'create/', views.AdvertFormView.as_view(), name='adverts-create'), path
('<str:category>/', views.AdvertListView.as_view(), name=
'adverts-list-categories')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
app_name = 'adverts'
urlpatterns = [path('', views.AdvertListView.as_view(), name='list'), path(
'create/', views.AdvertFormView.as_view(), name='adverts-create'), path
('<str:category>/', views.AdvertListView.as_view(), name=
'adverts-list-categories')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
app_name = 'adverts'
urlpatterns = [
path('', views.AdvertListView.as_view(), name="list"),
path('create/', views.AdvertFormView.as_view(), name='adverts-create'),
path('<str:category>/', views.AdvertListView.as_view(), name="adverts-list-categories"),
]
|
flexible
|
{
"blob_id": "8c1718f56a73fdd962154abfaedc7c0c3cb0d9ba",
"index": 6626,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'adverts'\nurlpatterns = [path('', views.AdvertListView.as_view(), name='list'), path(\n 'create/', views.AdvertFormView.as_view(), name='adverts-create'), path\n ('<str:category>/', views.AdvertListView.as_view(), name=\n 'adverts-list-categories')]\n",
"step-3": "from django.urls import path\nfrom . import views\napp_name = 'adverts'\nurlpatterns = [path('', views.AdvertListView.as_view(), name='list'), path(\n 'create/', views.AdvertFormView.as_view(), name='adverts-create'), path\n ('<str:category>/', views.AdvertListView.as_view(), name=\n 'adverts-list-categories')]\n",
"step-4": "from django.urls import path\n\nfrom . import views\n\napp_name = 'adverts'\n\nurlpatterns = [\n path('', views.AdvertListView.as_view(), name=\"list\"),\n path('create/', views.AdvertFormView.as_view(), name='adverts-create'),\n path('<str:category>/', views.AdvertListView.as_view(), name=\"adverts-list-categories\"),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DeviceBGPSession(PluginTemplateExtension):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeviceBGPSession(PluginTemplateExtension):
<|reserved_special_token_0|>
def left_page(self):
if self.context['config'].get('device_ext_page') == 'left':
return self.x_page()
return ''
def right_page(self):
if self.context['config'].get('device_ext_page') == 'right':
return self.x_page()
return ''
<|reserved_special_token_0|>
def x_page(self):
obj = self.context['object']
sess = BGPSession.objects.filter(device=obj)
sess_table = BGPSessionTable(sess)
return self.render('netbox_bgp/device_extend.html', extra_context={
'related_session_table': sess_table})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeviceBGPSession(PluginTemplateExtension):
model = 'dcim.device'
def left_page(self):
if self.context['config'].get('device_ext_page') == 'left':
return self.x_page()
return ''
def right_page(self):
if self.context['config'].get('device_ext_page') == 'right':
return self.x_page()
return ''
def full_width_page(self):
if self.context['config'].get('device_ext_page') == 'full_width':
return self.x_page()
return ''
def x_page(self):
obj = self.context['object']
sess = BGPSession.objects.filter(device=obj)
sess_table = BGPSessionTable(sess)
return self.render('netbox_bgp/device_extend.html', extra_context={
'related_session_table': sess_table})
template_extensions = [DeviceBGPSession]
<|reserved_special_token_1|>
from extras.plugins import PluginTemplateExtension
from .models import BGPSession
from .tables import BGPSessionTable
class DeviceBGPSession(PluginTemplateExtension):
model = 'dcim.device'
def left_page(self):
if self.context['config'].get('device_ext_page') == 'left':
return self.x_page()
return ''
def right_page(self):
if self.context['config'].get('device_ext_page') == 'right':
return self.x_page()
return ''
def full_width_page(self):
if self.context['config'].get('device_ext_page') == 'full_width':
return self.x_page()
return ''
def x_page(self):
obj = self.context['object']
sess = BGPSession.objects.filter(device=obj)
sess_table = BGPSessionTable(sess)
return self.render('netbox_bgp/device_extend.html', extra_context={
'related_session_table': sess_table})
template_extensions = [DeviceBGPSession]
|
flexible
|
{
"blob_id": "be566041402dc1705aa9d644edc44de8792fbb3c",
"index": 4850,
"step-1": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n <mask token>\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n <mask token>\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n model = 'dcim.device'\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n\n def full_width_page(self):\n if self.context['config'].get('device_ext_page') == 'full_width':\n return self.x_page()\n return ''\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\ntemplate_extensions = [DeviceBGPSession]\n",
"step-4": "from extras.plugins import PluginTemplateExtension\nfrom .models import BGPSession\nfrom .tables import BGPSessionTable\n\n\nclass DeviceBGPSession(PluginTemplateExtension):\n model = 'dcim.device'\n\n def left_page(self):\n if self.context['config'].get('device_ext_page') == 'left':\n return self.x_page()\n return ''\n\n def right_page(self):\n if self.context['config'].get('device_ext_page') == 'right':\n return self.x_page()\n return ''\n\n def full_width_page(self):\n if self.context['config'].get('device_ext_page') == 'full_width':\n return self.x_page()\n return ''\n\n def x_page(self):\n obj = self.context['object']\n sess = BGPSession.objects.filter(device=obj)\n sess_table = BGPSessionTable(sess)\n return self.render('netbox_bgp/device_extend.html', extra_context={\n 'related_session_table': sess_table})\n\n\ntemplate_extensions = [DeviceBGPSession]\n",
"step-5": null,
"step-ids": [
1,
4,
7,
8
]
}
|
[
1,
4,
7,
8
] |
from flask import Flask
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def index():
return '2018/6/1 hello python'
@app.route('/news')
def news():
return '内蒙古新闻资讯,请选择浏览'
if __name__ == '__main__':
manager.run()
|
normal
|
{
"blob_id": "f9d8280d765826b05bfa7989645e487431799f85",
"index": 7809,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return '2018/6/1 hello python'\n\n\n@app.route('/news')\ndef news():\n return '内蒙古新闻资讯,请选择浏览'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return '2018/6/1 hello python'\n\n\n@app.route('/news')\ndef news():\n return '内蒙古新闻资讯,请选择浏览'\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-3": "<mask token>\napp = Flask(__name__)\nmanager = Manager(app)\n\n\n@app.route('/')\ndef index():\n return '2018/6/1 hello python'\n\n\n@app.route('/news')\ndef news():\n return '内蒙古新闻资讯,请选择浏览'\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-4": "from flask import Flask\nfrom flask_script import Manager\napp = Flask(__name__)\nmanager = Manager(app)\n\n\n@app.route('/')\ndef index():\n return '2018/6/1 hello python'\n\n\n@app.route('/news')\ndef news():\n return '内蒙古新闻资讯,请选择浏览'\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# 백준 문제(2021.5.22)
# 10039번) 상현이가 가르치는 아이폰 앱 개발 수업의 수강생은 원섭, 세희, 상근, 숭, 강수이다.
# 어제 이 수업의 기말고사가 있었고, 상현이는 지금 학생들의 기말고사 시험지를 채점하고 있다.
# 기말고사 점수가 40점 이상인 학생들은 그 점수 그대로 자신의 성적이 된다.
# 하지만, 40점 미만인 학생들은 보충학습을 듣는 조건을 수락하면 40점을 받게 된다.
# 보충학습은 거부할 수 없기 때문에, 40점 미만인 학생들은 항상 40점을 받게 된다.
# 학생 5명의 점수가 주어졌을 때, 평균 점수를 구하는 프로그램을 작성하시오.
result = 0
for i in range(5) :
score = int(input())
if(score < 40) :
score = 40
result += score
print(result//5)
|
normal
|
{
"blob_id": "4a13a0d7aa2371d7c8963a01b7cc1b93f4110d5e",
"index": 5356,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n score = int(input())\n if score < 40:\n score = 40\n result += score\nprint(result // 5)\n",
"step-3": "result = 0\nfor i in range(5):\n score = int(input())\n if score < 40:\n score = 40\n result += score\nprint(result // 5)\n",
"step-4": "# 백준 문제(2021.5.22)\n# 10039번) 상현이가 가르치는 아이폰 앱 개발 수업의 수강생은 원섭, 세희, 상근, 숭, 강수이다.\n# 어제 이 수업의 기말고사가 있었고, 상현이는 지금 학생들의 기말고사 시험지를 채점하고 있다. \n# 기말고사 점수가 40점 이상인 학생들은 그 점수 그대로 자신의 성적이 된다. \n# 하지만, 40점 미만인 학생들은 보충학습을 듣는 조건을 수락하면 40점을 받게 된다. \n# 보충학습은 거부할 수 없기 때문에, 40점 미만인 학생들은 항상 40점을 받게 된다.\n# 학생 5명의 점수가 주어졌을 때, 평균 점수를 구하는 프로그램을 작성하시오.\n\nresult = 0\n\nfor i in range(5) :\n score = int(input())\n if(score < 40) :\n score = 40\n result += score\nprint(result//5)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class CfCoreConfig(AppConfig):
name = 'cf_core'
|
normal
|
{
"blob_id": "01847c9e601eae6775cd4324483740c30e344557",
"index": 382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Pack)
admin.site.register(Cliente)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Cliente, Pack
admin.site.register(Pack)
admin.site.register(Cliente)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Cliente, Pack
# Register your models here.
admin.site.register(Pack)
admin.site.register(Cliente)
|
flexible
|
{
"blob_id": "2af590ad11704ecf21489a5d546e61f40dcceee6",
"index": 2121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Pack)\nadmin.site.register(Cliente)\n",
"step-3": "from django.contrib import admin\nfrom .models import Cliente, Pack\nadmin.site.register(Pack)\nadmin.site.register(Cliente)\n",
"step-4": "from django.contrib import admin\nfrom .models import Cliente, Pack\n\n# Register your models here.\nadmin.site.register(Pack)\nadmin.site.register(Cliente)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# SPDX-License-Identifier: Apache-2.0
"""
.. _example-lightgbm-pipe:
Convert a pipeline with a LightGbm model
========================================
.. index:: LightGbm
*sklearn-onnx* only converts *scikit-learn* models into *ONNX*
but many libraries implement *scikit-learn* API so that their models
can be included in a *scikit-learn* pipeline. This example considers
a pipeline including a *LightGbm* model. *sklearn-onnx* can convert
the whole pipeline as long as it knows the converter associated to
a *LGBMClassifier*. Let's see how to do it.
Train a LightGBM classifier
+++++++++++++++++++++++++++
"""
import lightgbm
import onnxmltools
import skl2onnx
import onnx
import sklearn
import matplotlib.pyplot as plt
import os
from onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer
import onnxruntime as rt
from onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail
from skl2onnx import convert_sklearn, update_registered_converter
from skl2onnx.common.shape_calculator import (
calculate_linear_classifier_output_shapes,
) # noqa
from onnxmltools.convert.lightgbm.operator_converters.LightGbm import (
convert_lightgbm,
) # noqa
import onnxmltools.convert.common.data_types
from skl2onnx.common.data_types import FloatTensorType
import numpy
from sklearn.datasets import load_iris
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from lightgbm import LGBMClassifier
data = load_iris()
X = data.data[:, :2]
y = data.target
ind = numpy.arange(X.shape[0])
numpy.random.shuffle(ind)
X = X[ind, :].copy()
y = y[ind].copy()
pipe = Pipeline(
[("scaler", StandardScaler()), ("lgbm", LGBMClassifier(n_estimators=3))]
)
pipe.fit(X, y)
######################################
# Register the converter for LGBMClassifier
# +++++++++++++++++++++++++++++++++++++++++
#
# The converter is implemented in *onnxmltools*:
# `onnxmltools...LightGbm.py
# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/
# lightgbm/operator_converters/LightGbm.py>`_.
# and the shape calculator:
# `onnxmltools...Classifier.py
# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/
# lightgbm/shape_calculators/Classifier.py>`_.
##############################################
# Then we import the converter and shape calculator.
###########################
# Let's register the new converter.
update_registered_converter(
LGBMClassifier,
"LightGbmLGBMClassifier",
calculate_linear_classifier_output_shapes,
convert_lightgbm,
options={"nocl": [True, False], "zipmap": [True, False, "columns"]},
)
##################################
# Convert again
# +++++++++++++
model_onnx = convert_sklearn(
pipe,
"pipeline_lightgbm",
[("input", FloatTensorType([None, 2]))],
target_opset={"": 12, "ai.onnx.ml": 2},
)
# And save.
with open("pipeline_lightgbm.onnx", "wb") as f:
f.write(model_onnx.SerializeToString())
###########################
# Compare the predictions
# +++++++++++++++++++++++
#
# Predictions with LightGbm.
print("predict", pipe.predict(X[:5]))
print("predict_proba", pipe.predict_proba(X[:1]))
##########################
# Predictions with onnxruntime.
try:
sess = rt.InferenceSession("pipeline_lightgbm.onnx")
except OrtFail as e:
print(e)
print("The converter requires onnxmltools>=1.7.0")
sess = None
if sess is not None:
pred_onx = sess.run(None, {"input": X[:5].astype(numpy.float32)})
print("predict", pred_onx[0])
print("predict_proba", pred_onx[1][:1])
##################################
# Display the ONNX graph
# ++++++++++++++++++++++
pydot_graph = GetPydotGraph(
model_onnx.graph,
name=model_onnx.graph.name,
rankdir="TB",
node_producer=GetOpNodeProducer(
"docstring", color="yellow", fillcolor="yellow", style="filled"
),
)
pydot_graph.write_dot("pipeline.dot")
os.system("dot -O -Gdpi=300 -Tpng pipeline.dot")
image = plt.imread("pipeline.dot.png")
fig, ax = plt.subplots(figsize=(40, 20))
ax.imshow(image)
ax.axis("off")
#################################
# **Versions used for this example**
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
print("onnxmltools: ", onnxmltools.__version__)
print("lightgbm: ", lightgbm.__version__)
|
normal
|
{
"blob_id": "32227029cb4e852536611f7ae5dec5118bd5e195",
"index": 8324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.shuffle(ind)\n<mask token>\npipe.fit(X, y)\nupdate_registered_converter(LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm, options={\n 'nocl': [True, False], 'zipmap': [True, False, 'columns']})\n<mask token>\nwith open('pipeline_lightgbm.onnx', 'wb') as f:\n f.write(model_onnx.SerializeToString())\nprint('predict', pipe.predict(X[:5]))\nprint('predict_proba', pipe.predict_proba(X[:1]))\ntry:\n sess = rt.InferenceSession('pipeline_lightgbm.onnx')\nexcept OrtFail as e:\n print(e)\n print('The converter requires onnxmltools>=1.7.0')\n sess = None\nif sess is not None:\n pred_onx = sess.run(None, {'input': X[:5].astype(numpy.float32)})\n print('predict', pred_onx[0])\n print('predict_proba', pred_onx[1][:1])\n<mask token>\npydot_graph.write_dot('pipeline.dot')\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\n<mask token>\nax.imshow(image)\nax.axis('off')\nprint('numpy:', numpy.__version__)\nprint('scikit-learn:', sklearn.__version__)\nprint('onnx: ', onnx.__version__)\nprint('onnxruntime: ', rt.__version__)\nprint('skl2onnx: ', skl2onnx.__version__)\nprint('onnxmltools: ', onnxmltools.__version__)\nprint('lightgbm: ', lightgbm.__version__)\n",
"step-3": "<mask token>\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\npipe = Pipeline([('scaler', StandardScaler()), ('lgbm', LGBMClassifier(\n n_estimators=3))])\npipe.fit(X, y)\nupdate_registered_converter(LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm, options={\n 'nocl': [True, False], 'zipmap': [True, False, 'columns']})\nmodel_onnx = convert_sklearn(pipe, 'pipeline_lightgbm', [('input',\n FloatTensorType([None, 2]))], target_opset={'': 12, 'ai.onnx.ml': 2})\nwith open('pipeline_lightgbm.onnx', 'wb') as f:\n f.write(model_onnx.SerializeToString())\nprint('predict', pipe.predict(X[:5]))\nprint('predict_proba', pipe.predict_proba(X[:1]))\ntry:\n sess = rt.InferenceSession('pipeline_lightgbm.onnx')\nexcept OrtFail as e:\n print(e)\n print('The converter requires onnxmltools>=1.7.0')\n sess = None\nif sess is not None:\n pred_onx = sess.run(None, {'input': X[:5].astype(numpy.float32)})\n print('predict', pred_onx[0])\n print('predict_proba', pred_onx[1][:1])\npydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name,\n rankdir='TB', node_producer=GetOpNodeProducer('docstring', color=\n 'yellow', fillcolor='yellow', style='filled'))\npydot_graph.write_dot('pipeline.dot')\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\nimage = plt.imread('pipeline.dot.png')\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')\nprint('numpy:', numpy.__version__)\nprint('scikit-learn:', sklearn.__version__)\nprint('onnx: ', onnx.__version__)\nprint('onnxruntime: ', rt.__version__)\nprint('skl2onnx: ', skl2onnx.__version__)\nprint('onnxmltools: ', onnxmltools.__version__)\nprint('lightgbm: ', lightgbm.__version__)\n",
"step-4": "<mask token>\nimport lightgbm\nimport onnxmltools\nimport skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import calculate_linear_classifier_output_shapes\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import convert_lightgbm\nimport onnxmltools.convert.common.data_types\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\npipe = Pipeline([('scaler', StandardScaler()), ('lgbm', LGBMClassifier(\n n_estimators=3))])\npipe.fit(X, y)\nupdate_registered_converter(LGBMClassifier, 'LightGbmLGBMClassifier',\n calculate_linear_classifier_output_shapes, convert_lightgbm, options={\n 'nocl': [True, False], 'zipmap': [True, False, 'columns']})\nmodel_onnx = convert_sklearn(pipe, 'pipeline_lightgbm', [('input',\n FloatTensorType([None, 2]))], target_opset={'': 12, 'ai.onnx.ml': 2})\nwith open('pipeline_lightgbm.onnx', 'wb') as f:\n f.write(model_onnx.SerializeToString())\nprint('predict', pipe.predict(X[:5]))\nprint('predict_proba', pipe.predict_proba(X[:1]))\ntry:\n sess = rt.InferenceSession('pipeline_lightgbm.onnx')\nexcept OrtFail as e:\n print(e)\n print('The converter requires onnxmltools>=1.7.0')\n sess = None\nif sess is not None:\n pred_onx = sess.run(None, {'input': X[:5].astype(numpy.float32)})\n print('predict', pred_onx[0])\n print('predict_proba', pred_onx[1][:1])\npydot_graph = GetPydotGraph(model_onnx.graph, name=model_onnx.graph.name,\n rankdir='TB', node_producer=GetOpNodeProducer('docstring', color=\n 'yellow', fillcolor='yellow', style='filled'))\npydot_graph.write_dot('pipeline.dot')\nos.system('dot -O -Gdpi=300 -Tpng pipeline.dot')\nimage = plt.imread('pipeline.dot.png')\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis('off')\nprint('numpy:', numpy.__version__)\nprint('scikit-learn:', sklearn.__version__)\nprint('onnx: ', onnx.__version__)\nprint('onnxruntime: ', rt.__version__)\nprint('skl2onnx: ', skl2onnx.__version__)\nprint('onnxmltools: ', onnxmltools.__version__)\nprint('lightgbm: ', lightgbm.__version__)\n",
"step-5": "# SPDX-License-Identifier: Apache-2.0\n\n\n\"\"\"\n.. _example-lightgbm-pipe:\n\nConvert a pipeline with a LightGbm model\n========================================\n\n.. index:: LightGbm\n\n*sklearn-onnx* only converts *scikit-learn* models into *ONNX*\nbut many libraries implement *scikit-learn* API so that their models\ncan be included in a *scikit-learn* pipeline. This example considers\na pipeline including a *LightGbm* model. *sklearn-onnx* can convert\nthe whole pipeline as long as it knows the converter associated to\na *LGBMClassifier*. Let's see how to do it.\n\nTrain a LightGBM classifier\n+++++++++++++++++++++++++++\n\"\"\"\nimport lightgbm\nimport onnxmltools\nimport skl2onnx\nimport onnx\nimport sklearn\nimport matplotlib.pyplot as plt\nimport os\nfrom onnx.tools.net_drawer import GetPydotGraph, GetOpNodeProducer\nimport onnxruntime as rt\nfrom onnxruntime.capi.onnxruntime_pybind11_state import Fail as OrtFail\nfrom skl2onnx import convert_sklearn, update_registered_converter\nfrom skl2onnx.common.shape_calculator import (\n calculate_linear_classifier_output_shapes,\n) # noqa\nfrom onnxmltools.convert.lightgbm.operator_converters.LightGbm import (\n convert_lightgbm,\n) # noqa\nimport onnxmltools.convert.common.data_types\nfrom skl2onnx.common.data_types import FloatTensorType\nimport numpy\nfrom sklearn.datasets import load_iris\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom lightgbm import LGBMClassifier\n\ndata = load_iris()\nX = data.data[:, :2]\ny = data.target\n\nind = numpy.arange(X.shape[0])\nnumpy.random.shuffle(ind)\nX = X[ind, :].copy()\ny = y[ind].copy()\n\npipe = Pipeline(\n [(\"scaler\", StandardScaler()), (\"lgbm\", LGBMClassifier(n_estimators=3))]\n)\npipe.fit(X, y)\n\n######################################\n# Register the converter for LGBMClassifier\n# +++++++++++++++++++++++++++++++++++++++++\n#\n# The converter is implemented in *onnxmltools*:\n# `onnxmltools...LightGbm.py\n# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\n# lightgbm/operator_converters/LightGbm.py>`_.\n# and the shape calculator:\n# `onnxmltools...Classifier.py\n# <https://github.com/onnx/onnxmltools/blob/master/onnxmltools/convert/\n# lightgbm/shape_calculators/Classifier.py>`_.\n\n##############################################\n# Then we import the converter and shape calculator.\n\n###########################\n# Let's register the new converter.\nupdate_registered_converter(\n LGBMClassifier,\n \"LightGbmLGBMClassifier\",\n calculate_linear_classifier_output_shapes,\n convert_lightgbm,\n options={\"nocl\": [True, False], \"zipmap\": [True, False, \"columns\"]},\n)\n\n##################################\n# Convert again\n# +++++++++++++\n\nmodel_onnx = convert_sklearn(\n pipe,\n \"pipeline_lightgbm\",\n [(\"input\", FloatTensorType([None, 2]))],\n target_opset={\"\": 12, \"ai.onnx.ml\": 2},\n)\n\n# And save.\nwith open(\"pipeline_lightgbm.onnx\", \"wb\") as f:\n f.write(model_onnx.SerializeToString())\n\n###########################\n# Compare the predictions\n# +++++++++++++++++++++++\n#\n# Predictions with LightGbm.\n\nprint(\"predict\", pipe.predict(X[:5]))\nprint(\"predict_proba\", pipe.predict_proba(X[:1]))\n\n##########################\n# Predictions with onnxruntime.\n\ntry:\n sess = rt.InferenceSession(\"pipeline_lightgbm.onnx\")\nexcept OrtFail as e:\n print(e)\n print(\"The converter requires onnxmltools>=1.7.0\")\n sess = None\n\nif sess is not None:\n pred_onx = sess.run(None, {\"input\": X[:5].astype(numpy.float32)})\n print(\"predict\", pred_onx[0])\n print(\"predict_proba\", pred_onx[1][:1])\n\n##################################\n# Display the ONNX graph\n# ++++++++++++++++++++++\n\npydot_graph = GetPydotGraph(\n model_onnx.graph,\n name=model_onnx.graph.name,\n rankdir=\"TB\",\n node_producer=GetOpNodeProducer(\n \"docstring\", color=\"yellow\", fillcolor=\"yellow\", style=\"filled\"\n ),\n)\npydot_graph.write_dot(\"pipeline.dot\")\n\nos.system(\"dot -O -Gdpi=300 -Tpng pipeline.dot\")\n\nimage = plt.imread(\"pipeline.dot.png\")\nfig, ax = plt.subplots(figsize=(40, 20))\nax.imshow(image)\nax.axis(\"off\")\n\n#################################\n# **Versions used for this example**\n\nprint(\"numpy:\", numpy.__version__)\nprint(\"scikit-learn:\", sklearn.__version__)\nprint(\"onnx: \", onnx.__version__)\nprint(\"onnxruntime: \", rt.__version__)\nprint(\"skl2onnx: \", skl2onnx.__version__)\nprint(\"onnxmltools: \", onnxmltools.__version__)\nprint(\"lightgbm: \", lightgbm.__version__)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# template for "Stopwatch: The Game"
import math
import simplegui
# define global variables
successcount = 0;
totalstopcount = 0;
count = 0;
T = True;
F = True;
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
A = str(t // 600);
tem = (t // 10);
tem = (tem) % 60;
B = str(tem // 10);
C = str(tem % 10);
D = str(t % 10);
return A + ":" + B + C + "." + D;
# define event handlers for buttons; "Start", "Stop", "Reset"
def stop():
global successcount, totalstopcount, T;
timer.stop();
if (T == True):
if (F == False):
totalstopcount = totalstopcount + 1;
T = False;
if ((count % 10 == 0) and (count != 0)):
successcount = successcount + 1;
def start():
global T, F;
T = True;
F = False;
timer.start();
def reset():
global successcount, totalstopcount, count, F;
count = 0;
successcount = 0;
totalstopcount = 0;
F = True;
# define event handler for timer with 0.1 sec interval
def tick():
global count;
count = count + 1;
# define draw handler
def draw(canvas):
global count;
canvas.draw_text(format(count), [250, 250], 40, "red");
canvas.draw_text(str(successcount) + "/" + str(totalstopcount), [400, 100], 30, "orange");
# create frame
frame = simplegui.create_frame("Stopwatch", 500, 500);
frame.add_button("START", start);
frame.add_button("STOP", stop);
frame.add_button("RESET", reset);
# register event handlers
frame.set_draw_handler(draw);
timer = simplegui.create_timer(100, tick)
# start frame
frame.start();
# Please remember to review the grading rubric
|
normal
|
{
"blob_id": "bb198978ffc799bb43acf870467496e1dcc54d4b",
"index": 3710,
"step-1": "<mask token>\n\n\ndef format(t):\n A = str(t // 600)\n tem = t // 10\n tem = tem % 60\n B = str(tem // 10)\n C = str(tem % 10)\n D = str(t % 10)\n return A + ':' + B + C + '.' + D\n\n\n<mask token>\n\n\ndef reset():\n global successcount, totalstopcount, count, F\n count = 0\n successcount = 0\n totalstopcount = 0\n F = True\n\n\ndef tick():\n global count\n count = count + 1\n\n\ndef draw(canvas):\n global count\n canvas.draw_text(format(count), [250, 250], 40, 'red')\n canvas.draw_text(str(successcount) + '/' + str(totalstopcount), [400, \n 100], 30, 'orange')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef format(t):\n A = str(t // 600)\n tem = t // 10\n tem = tem % 60\n B = str(tem // 10)\n C = str(tem % 10)\n D = str(t % 10)\n return A + ':' + B + C + '.' + D\n\n\ndef stop():\n global successcount, totalstopcount, T\n timer.stop()\n if T == True:\n if F == False:\n totalstopcount = totalstopcount + 1\n T = False\n if count % 10 == 0 and count != 0:\n successcount = successcount + 1\n\n\n<mask token>\n\n\ndef reset():\n global successcount, totalstopcount, count, F\n count = 0\n successcount = 0\n totalstopcount = 0\n F = True\n\n\ndef tick():\n global count\n count = count + 1\n\n\ndef draw(canvas):\n global count\n canvas.draw_text(format(count), [250, 250], 40, 'red')\n canvas.draw_text(str(successcount) + '/' + str(totalstopcount), [400, \n 100], 30, 'orange')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef format(t):\n A = str(t // 600)\n tem = t // 10\n tem = tem % 60\n B = str(tem // 10)\n C = str(tem % 10)\n D = str(t % 10)\n return A + ':' + B + C + '.' + D\n\n\ndef stop():\n global successcount, totalstopcount, T\n timer.stop()\n if T == True:\n if F == False:\n totalstopcount = totalstopcount + 1\n T = False\n if count % 10 == 0 and count != 0:\n successcount = successcount + 1\n\n\ndef start():\n global T, F\n T = True\n F = False\n timer.start()\n\n\ndef reset():\n global successcount, totalstopcount, count, F\n count = 0\n successcount = 0\n totalstopcount = 0\n F = True\n\n\ndef tick():\n global count\n count = count + 1\n\n\ndef draw(canvas):\n global count\n canvas.draw_text(format(count), [250, 250], 40, 'red')\n canvas.draw_text(str(successcount) + '/' + str(totalstopcount), [400, \n 100], 30, 'orange')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef format(t):\n A = str(t // 600)\n tem = t // 10\n tem = tem % 60\n B = str(tem // 10)\n C = str(tem % 10)\n D = str(t % 10)\n return A + ':' + B + C + '.' + D\n\n\ndef stop():\n global successcount, totalstopcount, T\n timer.stop()\n if T == True:\n if F == False:\n totalstopcount = totalstopcount + 1\n T = False\n if count % 10 == 0 and count != 0:\n successcount = successcount + 1\n\n\ndef start():\n global T, F\n T = True\n F = False\n timer.start()\n\n\ndef reset():\n global successcount, totalstopcount, count, F\n count = 0\n successcount = 0\n totalstopcount = 0\n F = True\n\n\ndef tick():\n global count\n count = count + 1\n\n\ndef draw(canvas):\n global count\n canvas.draw_text(format(count), [250, 250], 40, 'red')\n canvas.draw_text(str(successcount) + '/' + str(totalstopcount), [400, \n 100], 30, 'orange')\n\n\n<mask token>\nframe.add_button('START', start)\nframe.add_button('STOP', stop)\nframe.add_button('RESET', reset)\nframe.set_draw_handler(draw)\n<mask token>\nframe.start()\n",
"step-5": "# template for \"Stopwatch: The Game\"\nimport math\nimport simplegui\n\n\n# define global variables\nsuccesscount = 0;\ntotalstopcount = 0;\ncount = 0;\nT = True;\nF = True;\n\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(t):\n A = str(t // 600);\n tem = (t // 10);\n tem = (tem) % 60;\n B = str(tem // 10);\n C = str(tem % 10);\n D = str(t % 10);\n return A + \":\" + B + C + \".\" + D;\n\n\n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef stop():\n global successcount, totalstopcount, T;\n timer.stop();\n if (T == True):\n if (F == False):\n totalstopcount = totalstopcount + 1;\n T = False;\n if ((count % 10 == 0) and (count != 0)):\n successcount = successcount + 1;\n\n\ndef start():\n global T, F;\n T = True;\n F = False;\n timer.start();\n\n\ndef reset():\n global successcount, totalstopcount, count, F;\n count = 0;\n successcount = 0;\n totalstopcount = 0;\n F = True;\n\n\n# define event handler for timer with 0.1 sec interval\ndef tick():\n global count;\n count = count + 1;\n\n\n# define draw handler\ndef draw(canvas):\n global count;\n canvas.draw_text(format(count), [250, 250], 40, \"red\");\n canvas.draw_text(str(successcount) + \"/\" + str(totalstopcount), [400, 100], 30, \"orange\");\n\n\n# create frame\nframe = simplegui.create_frame(\"Stopwatch\", 500, 500);\nframe.add_button(\"START\", start);\nframe.add_button(\"STOP\", stop);\nframe.add_button(\"RESET\", reset);\n\n# register event handlers\nframe.set_draw_handler(draw);\ntimer = simplegui.create_timer(100, tick)\n\n# start frame\nframe.start();\n\n# Please remember to review the grading rubric\n\n",
"step-ids": [
4,
5,
6,
7,
10
]
}
|
[
4,
5,
6,
7,
10
] |
<|reserved_special_token_0|>
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = []
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %
chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,
'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id,
str(e - t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
flags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')
flags.DEFINE_integer('num_chunks', 36, 'how many files')
flags.DEFINE_string('out_dir', '2020-04-10/', 'out path')
logging.set_verbosity(logging.INFO)
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = []
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %
chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,
'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id,
str(e - t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
if __name__ == '__main__':
app.run(main)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FLAGS = flags.FLAGS
flags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')
flags.DEFINE_integer('num_chunks', 36, 'how many files')
flags.DEFINE_string('out_dir', '2020-04-10/', 'out path')
logging.set_verbosity(logging.INFO)
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = []
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %
chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,
'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id,
str(e - t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
if __name__ == '__main__':
app.run(main)
<|reserved_special_token_1|>
import pickle
from absl import flags
from absl import app
from absl import logging
import time
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')
flags.DEFINE_integer('num_chunks', 36, 'how many files')
flags.DEFINE_string('out_dir', '2020-04-10/', 'out path')
logging.set_verbosity(logging.INFO)
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = []
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %
chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,
'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id,
str(e - t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
if __name__ == '__main__':
app.run(main)
<|reserved_special_token_1|>
import pickle
from absl import flags
from absl import app
from absl import logging
import time
import numpy as np
FLAGS = flags.FLAGS
flags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')
flags.DEFINE_integer('num_chunks', 36, 'how many files')
flags.DEFINE_string('out_dir', '2020-04-10/', 'out path')
logging.set_verbosity(logging.INFO)
def load_all_vectors(num_chunks):
all_vectors = []
meta_data = [] # (doc_id, section_id, sentence_id, sentence)
for chunk_id in range(num_chunks):
logging.info('Processing file %s', chunk_id)
t = time.time()
vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' % chunk_id).astype(np.float32)
with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id, 'rb') as fin:
meta = pickle.load(fin)
vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)
vector_norms[vector_norms == 0] = 1.0
vectors /= vector_norms
all_vectors.append(vectors)
meta_data.extend(meta)
e = time.time()
logging.info('Finished processing chunk %s in %s seconds', chunk_id, str(e-t))
all_vec = np.concatenate(all_vectors)
logging.info('Concatenated shape %s' % str(all_vec.shape))
return all_vec, meta_data
def main(argv):
logging.info('Running reduce vecs with args %s', str(argv))
logging.info('Running on %s files', str(FLAGS.num_chunks))
all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)
np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)
with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:
pickle.dump(all_meta, fout)
if __name__ == "__main__":
app.run(main)
|
flexible
|
{
"blob_id": "8aa35bcaa4e564306125b37c70a8a92f26da736d",
"index": 7418,
"step-1": "<mask token>\n\n\ndef load_all_vectors(num_chunks):\n all_vectors = []\n meta_data = []\n for chunk_id in range(num_chunks):\n logging.info('Processing file %s', chunk_id)\n t = time.time()\n vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %\n chunk_id).astype(np.float32)\n with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,\n 'rb') as fin:\n meta = pickle.load(fin)\n vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n vector_norms[vector_norms == 0] = 1.0\n vectors /= vector_norms\n all_vectors.append(vectors)\n meta_data.extend(meta)\n e = time.time()\n logging.info('Finished processing chunk %s in %s seconds', chunk_id,\n str(e - t))\n all_vec = np.concatenate(all_vectors)\n logging.info('Concatenated shape %s' % str(all_vec.shape))\n return all_vec, meta_data\n\n\ndef main(argv):\n logging.info('Running reduce vecs with args %s', str(argv))\n logging.info('Running on %s files', str(FLAGS.num_chunks))\n all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)\n np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)\n with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:\n pickle.dump(all_meta, fout)\n\n\n<mask token>\n",
"step-2": "<mask token>\nflags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')\nflags.DEFINE_integer('num_chunks', 36, 'how many files')\nflags.DEFINE_string('out_dir', '2020-04-10/', 'out path')\nlogging.set_verbosity(logging.INFO)\n\n\ndef load_all_vectors(num_chunks):\n all_vectors = []\n meta_data = []\n for chunk_id in range(num_chunks):\n logging.info('Processing file %s', chunk_id)\n t = time.time()\n vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %\n chunk_id).astype(np.float32)\n with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,\n 'rb') as fin:\n meta = pickle.load(fin)\n vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n vector_norms[vector_norms == 0] = 1.0\n vectors /= vector_norms\n all_vectors.append(vectors)\n meta_data.extend(meta)\n e = time.time()\n logging.info('Finished processing chunk %s in %s seconds', chunk_id,\n str(e - t))\n all_vec = np.concatenate(all_vectors)\n logging.info('Concatenated shape %s' % str(all_vec.shape))\n return all_vec, meta_data\n\n\ndef main(argv):\n logging.info('Running reduce vecs with args %s', str(argv))\n logging.info('Running on %s files', str(FLAGS.num_chunks))\n all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)\n np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)\n with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:\n pickle.dump(all_meta, fout)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-3": "<mask token>\nFLAGS = flags.FLAGS\nflags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')\nflags.DEFINE_integer('num_chunks', 36, 'how many files')\nflags.DEFINE_string('out_dir', '2020-04-10/', 'out path')\nlogging.set_verbosity(logging.INFO)\n\n\ndef load_all_vectors(num_chunks):\n all_vectors = []\n meta_data = []\n for chunk_id in range(num_chunks):\n logging.info('Processing file %s', chunk_id)\n t = time.time()\n vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %\n chunk_id).astype(np.float32)\n with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,\n 'rb') as fin:\n meta = pickle.load(fin)\n vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n vector_norms[vector_norms == 0] = 1.0\n vectors /= vector_norms\n all_vectors.append(vectors)\n meta_data.extend(meta)\n e = time.time()\n logging.info('Finished processing chunk %s in %s seconds', chunk_id,\n str(e - t))\n all_vec = np.concatenate(all_vectors)\n logging.info('Concatenated shape %s' % str(all_vec.shape))\n return all_vec, meta_data\n\n\ndef main(argv):\n logging.info('Running reduce vecs with args %s', str(argv))\n logging.info('Running on %s files', str(FLAGS.num_chunks))\n all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)\n np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)\n with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:\n pickle.dump(all_meta, fout)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-4": "import pickle\nfrom absl import flags\nfrom absl import app\nfrom absl import logging\nimport time\nimport numpy as np\nFLAGS = flags.FLAGS\nflags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')\nflags.DEFINE_integer('num_chunks', 36, 'how many files')\nflags.DEFINE_string('out_dir', '2020-04-10/', 'out path')\nlogging.set_verbosity(logging.INFO)\n\n\ndef load_all_vectors(num_chunks):\n all_vectors = []\n meta_data = []\n for chunk_id in range(num_chunks):\n logging.info('Processing file %s', chunk_id)\n t = time.time()\n vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' %\n chunk_id).astype(np.float32)\n with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id,\n 'rb') as fin:\n meta = pickle.load(fin)\n vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n vector_norms[vector_norms == 0] = 1.0\n vectors /= vector_norms\n all_vectors.append(vectors)\n meta_data.extend(meta)\n e = time.time()\n logging.info('Finished processing chunk %s in %s seconds', chunk_id,\n str(e - t))\n all_vec = np.concatenate(all_vectors)\n logging.info('Concatenated shape %s' % str(all_vec.shape))\n return all_vec, meta_data\n\n\ndef main(argv):\n logging.info('Running reduce vecs with args %s', str(argv))\n logging.info('Running on %s files', str(FLAGS.num_chunks))\n all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)\n np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)\n with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:\n pickle.dump(all_meta, fout)\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"step-5": "\nimport pickle\n\nfrom absl import flags\nfrom absl import app\nfrom absl import logging\nimport time\nimport numpy as np\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('sent2vec_dir', '2020-04-10/sent2vec/', 'out path')\nflags.DEFINE_integer('num_chunks', 36, 'how many files')\nflags.DEFINE_string('out_dir', '2020-04-10/', 'out path')\n\nlogging.set_verbosity(logging.INFO)\n\ndef load_all_vectors(num_chunks):\n all_vectors = []\n meta_data = [] # (doc_id, section_id, sentence_id, sentence)\n for chunk_id in range(num_chunks):\n logging.info('Processing file %s', chunk_id)\n t = time.time()\n vectors = np.load(FLAGS.sent2vec_dir + '/chunk_%s.vectors.npy' % chunk_id).astype(np.float32)\n with open(FLAGS.sent2vec_dir + '/chunk_%s.sentences.pkl' % chunk_id, 'rb') as fin:\n meta = pickle.load(fin)\n\n vector_norms = np.linalg.norm(vectors, axis=1, keepdims=True)\n vector_norms[vector_norms == 0] = 1.0\n vectors /= vector_norms\n all_vectors.append(vectors)\n meta_data.extend(meta)\n e = time.time()\n\n logging.info('Finished processing chunk %s in %s seconds', chunk_id, str(e-t))\n all_vec = np.concatenate(all_vectors)\n logging.info('Concatenated shape %s' % str(all_vec.shape))\n return all_vec, meta_data\n\n\ndef main(argv):\n logging.info('Running reduce vecs with args %s', str(argv))\n logging.info('Running on %s files', str(FLAGS.num_chunks))\n all_vecs, all_meta = load_all_vectors(FLAGS.num_chunks)\n np.save('%s/all.npy' % FLAGS.out_dir, all_vecs)\n with open('%s/all.pkl' % FLAGS.out_dir, 'wb') as fout:\n pickle.dump(all_meta, fout)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python
# -*- coding : utf-8 -*-
"""
@author: Diogenes Augusto Fernandes Herminio <diofeher@gmail.com>
"""
# Director
class Director(object):
def __init__(self):
self.builder = None
def construct_building(self):
self.builder.new_building()
self.builder.build_floor()
self.builder.build_size()
def get_building(self):
return self.builder.building
# Abstract Builder
class Builder(object):
def __init__(self):
self.building = None
def new_building(self):
self.building = Building()
# Concrete Builder
class BuilderHouse(Builder):
def build_floor(self):
self.building.floor ='One'
def build_size(self):
self.building.size = 'Big'
class BuilderFlat(Builder):
def build_floor(self):
self.building.floor ='More than One'
def build_size(self):
self.building.size = 'Small'
# Product
class Building(object):
def __init__(self):
self.floor = None
self.size = None
def __repr__(self):
return 'Floor: %s | Size: %s' % (self.floor, self.size)
#Client
if __name__=="__main__":
director = Director()
director.builder = BuilderHouse()
director.construct_building()
building = director.get_building()
print building
|
normal
|
{
"blob_id": "8ee26d181f06a2caf2b2b5a71a6113c245a89c03",
"index": 3322,
"step-1": "#!/usr/bin/python\n# -*- coding : utf-8 -*-\n\"\"\"\n @author: Diogenes Augusto Fernandes Herminio <diofeher@gmail.com>\n\"\"\"\n\n# Director\nclass Director(object):\n def __init__(self):\n self.builder = None\n \n def construct_building(self):\n self.builder.new_building()\n self.builder.build_floor()\n self.builder.build_size()\n \n def get_building(self):\n return self.builder.building\n \n\n# Abstract Builder\nclass Builder(object):\n def __init__(self):\n self.building = None\n \n def new_building(self):\n self.building = Building()\n \n# Concrete Builder\nclass BuilderHouse(Builder): \n def build_floor(self):\n self.building.floor ='One'\n \n def build_size(self):\n self.building.size = 'Big'\n \nclass BuilderFlat(Builder):\n def build_floor(self):\n self.building.floor ='More than One'\n \n def build_size(self):\n self.building.size = 'Small'\n \n \n# Product\nclass Building(object):\n def __init__(self):\n self.floor = None\n self.size = None\n \n def __repr__(self):\n return 'Floor: %s | Size: %s' % (self.floor, self.size)\n\n\n#Client\nif __name__==\"__main__\":\n director = Director()\n director.builder = BuilderHouse()\n director.construct_building()\n building = director.get_building()\n print building",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Node2:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
<|reserved_special_token_0|>
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
class Node3:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node2:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
class Node3:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, value):
self._value = value
self._children = []
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Node2:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
class Node3:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Node2:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
class Node3:
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import random
a = random.sample(range(100), 10)
print("All items: {}".format(a))
it = iter(a) # call a.__iter__()
print("Num01: {}".format(next(it))) # call it.__next__()
print("Num02: {}".format(next(it)))
print("Num03: {}".format(it.__next__()))
it = iter(a)
i = 1
while True:
try:
x = next(it)
print("Num{:02d}: {}".format(i, x))
except StopIteration:
break
i += 1
class Node():
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
root = Node(0)
root.add_child(Node(1))
root.add_child(Node(2))
for x in root:
print(x)
class Node2():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
root = Node2(10)
root.add_child(Node2(11))
root.add_child(Node2(22))
for x in root:
print(x)
class Node3():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
def recur_show(root):
print(root)
if root.has_children():
for node in root:
recur_show(node)
def recur_show2(root):
if root.has_children():
for node in root:
recur_show2(node)
print(root)
# 0
#
# 10 20 30
#
# 11 12 31
root = Node3(0)
c1 = Node3(10)
c2 = Node3(20)
c3 = Node3(30)
c11 = Node3(11)
c12 = Node3(12)
c31 = Node3(31)
root.add_child(c1)
root.add_child(c2)
root.add_child(c3)
c1.add_child(c11)
c1.add_child(c12)
c3.add_child(c31)
print("==================")
recur_show(root)
print("==================")
recur_show2(root)
|
flexible
|
{
"blob_id": "f5513bea4ca5f4c2ac80c4bf537a264a4052d1e9",
"index": 8866,
"step-1": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n <mask token>\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport random\n\na = random.sample(range(100), 10)\nprint(\"All items: {}\".format(a))\n\nit = iter(a) # call a.__iter__()\n\nprint(\"Num01: {}\".format(next(it))) # call it.__next__()\nprint(\"Num02: {}\".format(next(it)))\nprint(\"Num03: {}\".format(it.__next__()))\n\nit = iter(a)\ni = 1\nwhile True:\n try:\n x = next(it)\n print(\"Num{:02d}: {}\".format(i, x))\n except StopIteration:\n break\n i += 1\n\n\nclass Node():\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n return iter(self._children)\n \nroot = Node(0)\nroot.add_child(Node(1))\nroot.add_child(Node(2))\n\nfor x in root:\n print(x)\n\nclass Node2():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\nroot = Node2(10)\nroot.add_child(Node2(11))\nroot.add_child(Node2(22))\n\nfor x in root:\n print(x)\n\nclass Node3():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\ndef recur_show(root):\n print(root)\n if root.has_children():\n for node in root:\n recur_show(node)\n\ndef recur_show2(root):\n if root.has_children():\n for node in root:\n recur_show2(node)\n print(root)\n\n# 0\n# \n# 10 20 30\n# \n# 11 12 31\n\nroot = Node3(0)\nc1 = Node3(10)\nc2 = Node3(20)\nc3 = Node3(30)\nc11 = Node3(11)\nc12 = Node3(12)\nc31 = Node3(31)\nroot.add_child(c1)\nroot.add_child(c2)\nroot.add_child(c3)\nc1.add_child(c11)\nc1.add_child(c12)\nc3.add_child(c31)\n\nprint(\"==================\")\nrecur_show(root)\nprint(\"==================\")\nrecur_show2(root)\n",
"step-ids": [
12,
13,
15,
16,
24
]
}
|
[
12,
13,
15,
16,
24
] |
<|reserved_special_token_0|>
class Logger(models.Model):
Facinet = models.ForeignKey('Facinet', null=False, blank=False,
related_name='Loggers')
loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')
name = models.TextField(db_column='Name')
online = models.IntegerField(db_column='Online')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class LoggerMeasurement(models.Model):
Logger = models.ForeignKey('Logger', null=False, blank=False,
related_name='Measurement')
timestamp = models.DateTimeField()
measurement = models.DecimalField(max_digits=12, decimal_places=4)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Facinet(models.Model):
Building = models.ForeignKey('Building', null=False, blank=False,
related_name='FacinetNodes')
location = models.IntegerField(unique=True, db_column='Location')
name = models.TextField(db_column='Name')
connectionstring = models.TextField(db_column='ConnectionString')
tapidevice = models.TextField(db_column='TapiDevice', blank=True)
synctime = models.CharField(max_length=3, db_column='SyncTime')
online = models.CharField(max_length=3, db_column='Online')
onlineall = models.CharField(max_length=3, db_column='OnlineAll')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class Logger(models.Model):
Facinet = models.ForeignKey('Facinet', null=False, blank=False,
related_name='Loggers')
loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')
name = models.TextField(db_column='Name')
online = models.IntegerField(db_column='Online')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class LoggerMeasurement(models.Model):
Logger = models.ForeignKey('Logger', null=False, blank=False,
related_name='Measurement')
timestamp = models.DateTimeField()
measurement = models.DecimalField(max_digits=12, decimal_places=4)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BuildingPoint(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Facinet(models.Model):
Building = models.ForeignKey('Building', null=False, blank=False,
related_name='FacinetNodes')
location = models.IntegerField(unique=True, db_column='Location')
name = models.TextField(db_column='Name')
connectionstring = models.TextField(db_column='ConnectionString')
tapidevice = models.TextField(db_column='TapiDevice', blank=True)
synctime = models.CharField(max_length=3, db_column='SyncTime')
online = models.CharField(max_length=3, db_column='Online')
onlineall = models.CharField(max_length=3, db_column='OnlineAll')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class Logger(models.Model):
Facinet = models.ForeignKey('Facinet', null=False, blank=False,
related_name='Loggers')
loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')
name = models.TextField(db_column='Name')
online = models.IntegerField(db_column='Online')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class LoggerMeasurement(models.Model):
Logger = models.ForeignKey('Logger', null=False, blank=False,
related_name='Measurement')
timestamp = models.DateTimeField()
measurement = models.DecimalField(max_digits=12, decimal_places=4)
<|reserved_special_token_1|>
from django.db import models
class Building(models.Model):
Number = models.CharField(max_length=60)
Description = models.CharField(max_length=120)
OSMWAYID = models.DecimalField(decimal_places=0, max_digits=15)
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class BuildingPoint(models.Model):
parent = models.ForeignKey('Building', null=False, blank=False,
related_name='points')
OSMNODEID = models.DecimalField(decimal_places=0, max_digits=15)
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class Facinet(models.Model):
Building = models.ForeignKey('Building', null=False, blank=False,
related_name='FacinetNodes')
location = models.IntegerField(unique=True, db_column='Location')
name = models.TextField(db_column='Name')
connectionstring = models.TextField(db_column='ConnectionString')
tapidevice = models.TextField(db_column='TapiDevice', blank=True)
synctime = models.CharField(max_length=3, db_column='SyncTime')
online = models.CharField(max_length=3, db_column='Online')
onlineall = models.CharField(max_length=3, db_column='OnlineAll')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class Logger(models.Model):
Facinet = models.ForeignKey('Facinet', null=False, blank=False,
related_name='Loggers')
loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')
name = models.TextField(db_column='Name')
online = models.IntegerField(db_column='Online')
Lat = models.CharField(max_length=20)
Lon = models.CharField(max_length=20)
class LoggerMeasurement(models.Model):
Logger = models.ForeignKey('Logger', null=False, blank=False,
related_name='Measurement')
timestamp = models.DateTimeField()
measurement = models.DecimalField(max_digits=12, decimal_places=4)
<|reserved_special_token_1|>
from django.db import models
class Building(models.Model):
Number = models.CharField(max_length=60)
Description = models.CharField(max_length=120)
OSMWAYID = models.DecimalField(decimal_places=0, max_digits=15) # the osm way id
Lat = models.CharField(max_length=20) #lat/lon of then center
Lon = models.CharField(max_length=20) # lat/lon of the center of the building
class BuildingPoint(models.Model):
parent = models.ForeignKey('Building', null=False, blank=False, related_name='points')
OSMNODEID = models.DecimalField(decimal_places=0, max_digits=15) # the osm id
Lat = models.CharField(max_length=20) #lat/lon of then center
Lon = models.CharField(max_length=20) # lat/lon of the center of the building
class Facinet(models.Model):
##
Building = models.ForeignKey('Building', null=False, blank=False, related_name='FacinetNodes')
location = models.IntegerField(unique=True, db_column='Location') #
name = models.TextField(db_column='Name') #
connectionstring = models.TextField(db_column='ConnectionString') #
tapidevice = models.TextField(db_column='TapiDevice', blank=True) #
synctime = models.CharField(max_length=3, db_column='SyncTime') #
online = models.CharField(max_length=3, db_column='Online') #
onlineall = models.CharField(max_length=3, db_column='OnlineAll') #
## location for display
Lat = models.CharField(max_length=20) #lat/lon of facinet collector
Lon = models.CharField(max_length=20) # lat/lon of facinet collector
class Logger(models.Model):
Facinet = models.ForeignKey('Facinet', null=False, blank=False, related_name='Loggers')
loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex') #
name = models.TextField(db_column='Name') #
online = models.IntegerField(db_column='Online') #
## location for display
Lat = models.CharField(max_length=20) #lat/lon of the logger
Lon = models.CharField(max_length=20) # lat/lon of the logger
class LoggerMeasurement(models.Model):
Logger = models.ForeignKey('Logger', null=False, blank=False, related_name='Measurement')
timestamp = models.DateTimeField()
measurement = models.DecimalField(max_digits=12, decimal_places=4)
|
flexible
|
{
"blob_id": "02ddf213cd3f455f8d8fbde8621fc4788124d5a9",
"index": 3714,
"step-1": "<mask token>\n\n\nclass Logger(models.Model):\n Facinet = models.ForeignKey('Facinet', null=False, blank=False,\n related_name='Loggers')\n loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')\n name = models.TextField(db_column='Name')\n online = models.IntegerField(db_column='Online')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass LoggerMeasurement(models.Model):\n Logger = models.ForeignKey('Logger', null=False, blank=False,\n related_name='Measurement')\n timestamp = models.DateTimeField()\n measurement = models.DecimalField(max_digits=12, decimal_places=4)\n",
"step-2": "<mask token>\n\n\nclass Facinet(models.Model):\n Building = models.ForeignKey('Building', null=False, blank=False,\n related_name='FacinetNodes')\n location = models.IntegerField(unique=True, db_column='Location')\n name = models.TextField(db_column='Name')\n connectionstring = models.TextField(db_column='ConnectionString')\n tapidevice = models.TextField(db_column='TapiDevice', blank=True)\n synctime = models.CharField(max_length=3, db_column='SyncTime')\n online = models.CharField(max_length=3, db_column='Online')\n onlineall = models.CharField(max_length=3, db_column='OnlineAll')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass Logger(models.Model):\n Facinet = models.ForeignKey('Facinet', null=False, blank=False,\n related_name='Loggers')\n loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')\n name = models.TextField(db_column='Name')\n online = models.IntegerField(db_column='Online')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass LoggerMeasurement(models.Model):\n Logger = models.ForeignKey('Logger', null=False, blank=False,\n related_name='Measurement')\n timestamp = models.DateTimeField()\n measurement = models.DecimalField(max_digits=12, decimal_places=4)\n",
"step-3": "<mask token>\n\n\nclass BuildingPoint(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Facinet(models.Model):\n Building = models.ForeignKey('Building', null=False, blank=False,\n related_name='FacinetNodes')\n location = models.IntegerField(unique=True, db_column='Location')\n name = models.TextField(db_column='Name')\n connectionstring = models.TextField(db_column='ConnectionString')\n tapidevice = models.TextField(db_column='TapiDevice', blank=True)\n synctime = models.CharField(max_length=3, db_column='SyncTime')\n online = models.CharField(max_length=3, db_column='Online')\n onlineall = models.CharField(max_length=3, db_column='OnlineAll')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass Logger(models.Model):\n Facinet = models.ForeignKey('Facinet', null=False, blank=False,\n related_name='Loggers')\n loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')\n name = models.TextField(db_column='Name')\n online = models.IntegerField(db_column='Online')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass LoggerMeasurement(models.Model):\n Logger = models.ForeignKey('Logger', null=False, blank=False,\n related_name='Measurement')\n timestamp = models.DateTimeField()\n measurement = models.DecimalField(max_digits=12, decimal_places=4)\n",
"step-4": "from django.db import models\n\n\nclass Building(models.Model):\n Number = models.CharField(max_length=60)\n Description = models.CharField(max_length=120)\n OSMWAYID = models.DecimalField(decimal_places=0, max_digits=15)\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass BuildingPoint(models.Model):\n parent = models.ForeignKey('Building', null=False, blank=False,\n related_name='points')\n OSMNODEID = models.DecimalField(decimal_places=0, max_digits=15)\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass Facinet(models.Model):\n Building = models.ForeignKey('Building', null=False, blank=False,\n related_name='FacinetNodes')\n location = models.IntegerField(unique=True, db_column='Location')\n name = models.TextField(db_column='Name')\n connectionstring = models.TextField(db_column='ConnectionString')\n tapidevice = models.TextField(db_column='TapiDevice', blank=True)\n synctime = models.CharField(max_length=3, db_column='SyncTime')\n online = models.CharField(max_length=3, db_column='Online')\n onlineall = models.CharField(max_length=3, db_column='OnlineAll')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass Logger(models.Model):\n Facinet = models.ForeignKey('Facinet', null=False, blank=False,\n related_name='Loggers')\n loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex')\n name = models.TextField(db_column='Name')\n online = models.IntegerField(db_column='Online')\n Lat = models.CharField(max_length=20)\n Lon = models.CharField(max_length=20)\n\n\nclass LoggerMeasurement(models.Model):\n Logger = models.ForeignKey('Logger', null=False, blank=False,\n related_name='Measurement')\n timestamp = models.DateTimeField()\n measurement = models.DecimalField(max_digits=12, decimal_places=4)\n",
"step-5": "from django.db import models\n\n\nclass Building(models.Model):\n Number = models.CharField(max_length=60)\n Description = models.CharField(max_length=120)\n OSMWAYID = models.DecimalField(decimal_places=0, max_digits=15) # the osm way id\n Lat = models.CharField(max_length=20) #lat/lon of then center\n Lon = models.CharField(max_length=20) # lat/lon of the center of the building\n\n\nclass BuildingPoint(models.Model):\n parent = models.ForeignKey('Building', null=False, blank=False, related_name='points')\n OSMNODEID = models.DecimalField(decimal_places=0, max_digits=15) # the osm id\n Lat = models.CharField(max_length=20) #lat/lon of then center\n Lon = models.CharField(max_length=20) # lat/lon of the center of the building\n\n\nclass Facinet(models.Model):\n ##\n Building = models.ForeignKey('Building', null=False, blank=False, related_name='FacinetNodes')\n location = models.IntegerField(unique=True, db_column='Location') # \n name = models.TextField(db_column='Name') # \n connectionstring = models.TextField(db_column='ConnectionString') # \n tapidevice = models.TextField(db_column='TapiDevice', blank=True) # \n synctime = models.CharField(max_length=3, db_column='SyncTime') # \n online = models.CharField(max_length=3, db_column='Online') # \n onlineall = models.CharField(max_length=3, db_column='OnlineAll') # \n ## location for display\n Lat = models.CharField(max_length=20) #lat/lon of facinet collector\n Lon = models.CharField(max_length=20) # lat/lon of facinet collector\n\n\nclass Logger(models.Model):\n Facinet = models.ForeignKey('Facinet', null=False, blank=False, related_name='Loggers')\n loggerindex = models.IntegerField(unique=True, db_column='LoggerIndex') # \n name = models.TextField(db_column='Name') # \n online = models.IntegerField(db_column='Online') # \n ## location for display\n Lat = models.CharField(max_length=20) #lat/lon of the logger\n Lon = models.CharField(max_length=20) # lat/lon of the logger\n\nclass LoggerMeasurement(models.Model):\n Logger = models.ForeignKey('Logger', null=False, blank=False, related_name='Measurement')\n timestamp = models.DateTimeField()\n measurement = models.DecimalField(max_digits=12, decimal_places=4)\n",
"step-ids": [
4,
6,
7,
11,
12
]
}
|
[
4,
6,
7,
11,
12
] |
<|reserved_special_token_0|>
class TestInterpreter(unittest.TestCase):
<|reserved_special_token_0|>
def test_HelloWorld(self):
result = run_program(
"""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>."""
)
self.assertEquals(result, 'Hello World!')
<|reserved_special_token_0|>
def test_ROT13(self):
result = run_program(
"""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]"""
, 'applesauce')
self.assertEquals(result, 'nccyrfnhpr')
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, '[[]')
self.assertRaises(Exception, brainfuck.clean, '][')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestInterpreter(unittest.TestCase):
def setUp(self):
brainfuck.set_cell_size()
def test_HelloWorld(self):
result = run_program(
"""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>."""
)
self.assertEquals(result, 'Hello World!')
def test_Squares(self):
result = run_program(
"""
++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>
-]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+
+>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<
-]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]"""
)
expected_result = '\n'.join([str(x ** 2) for x in range(101)])
self.assertEquals(result, expected_result)
def test_ROT13(self):
result = run_program(
"""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]"""
, 'applesauce')
self.assertEquals(result, 'nccyrfnhpr')
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, '[[]')
self.assertRaises(Exception, brainfuck.clean, '][')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_program(program, input=None):
old_stdout = sys.stdout
old_stdin = sys.stdin
try:
out = StringIO()
sys.stdout = out
if input is not None:
input = StringIO(input)
sys.stdin = input
brainfuck.brainfuck(program)
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
return out.getvalue().strip()
class TestInterpreter(unittest.TestCase):
def setUp(self):
brainfuck.set_cell_size()
def test_HelloWorld(self):
result = run_program(
"""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>."""
)
self.assertEquals(result, 'Hello World!')
def test_Squares(self):
result = run_program(
"""
++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>
-]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+
+>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<
-]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]"""
)
expected_result = '\n'.join([str(x ** 2) for x in range(101)])
self.assertEquals(result, expected_result)
def test_ROT13(self):
result = run_program(
"""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]"""
, 'applesauce')
self.assertEquals(result, 'nccyrfnhpr')
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, '[[]')
self.assertRaises(Exception, brainfuck.clean, '][')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import unittest
import brainfuck
import sys
from StringIO import StringIO
def run_program(program, input=None):
old_stdout = sys.stdout
old_stdin = sys.stdin
try:
out = StringIO()
sys.stdout = out
if input is not None:
input = StringIO(input)
sys.stdin = input
brainfuck.brainfuck(program)
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
return out.getvalue().strip()
class TestInterpreter(unittest.TestCase):
def setUp(self):
brainfuck.set_cell_size()
def test_HelloWorld(self):
result = run_program(
"""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>."""
)
self.assertEquals(result, 'Hello World!')
def test_Squares(self):
result = run_program(
"""
++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>
-]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+
+>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<
-]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]"""
)
expected_result = '\n'.join([str(x ** 2) for x in range(101)])
self.assertEquals(result, expected_result)
def test_ROT13(self):
result = run_program(
"""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]"""
, 'applesauce')
self.assertEquals(result, 'nccyrfnhpr')
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, '[[]')
self.assertRaises(Exception, brainfuck.clean, '][')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
import brainfuck
import sys
from StringIO import StringIO
def run_program(program, input = None):
old_stdout = sys.stdout
old_stdin = sys.stdin
try:
out = StringIO()
sys.stdout = out
if input is not None:
input = StringIO(input)
sys.stdin = input
brainfuck.brainfuck(program)
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
return out.getvalue().strip()
class TestInterpreter(unittest.TestCase):
def setUp(self):
brainfuck.set_cell_size()
def test_HelloWorld(self):
result = run_program("""
++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.
+++++++..+++.>++.<<+++++++++++++++.>.+++.------.-
-------.>+.>.""")
self.assertEquals(result, "Hello World!")
def test_Squares(self):
result = run_program("""
++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>
-]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+
+>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<
-]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]""")
expected_result = "\n".join([str(x**2) for x in range(101)])
self.assertEquals(result, expected_result)
def test_ROT13(self):
result = run_program("""
-,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]
>>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++
++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[
-]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]""", "applesauce")
self.assertEquals(result, "nccyrfnhpr")
def test_Clean(self):
self.assertRaises(Exception, brainfuck.clean, "[[]")
self.assertRaises(Exception, brainfuck.clean, "][")
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "19ab44cec863560513aadd88b5fd4bb40f75e371",
"index": 2579,
"step-1": "<mask token>\n\n\nclass TestInterpreter(unittest.TestCase):\n <mask token>\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n <mask token>\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_program(program, input=None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input)\n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n return out.getvalue().strip()\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\n<mask token>\n",
"step-4": "import unittest\nimport brainfuck\nimport sys\nfrom StringIO import StringIO\n\n\ndef run_program(program, input=None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input)\n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n return out.getvalue().strip()\n\n\nclass TestInterpreter(unittest.TestCase):\n\n def setUp(self):\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\n \"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\"\n )\n self.assertEquals(result, 'Hello World!')\n\n def test_Squares(self):\n result = run_program(\n \"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\"\n )\n expected_result = '\\n'.join([str(x ** 2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\n \"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\"\n , 'applesauce')\n self.assertEquals(result, 'nccyrfnhpr')\n\n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, '[[]')\n self.assertRaises(Exception, brainfuck.clean, '][')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport brainfuck\nimport sys\nfrom StringIO import StringIO\n\n\ndef run_program(program, input = None):\n old_stdout = sys.stdout\n old_stdin = sys.stdin\n try:\n out = StringIO()\n sys.stdout = out\n if input is not None:\n input = StringIO(input) \n sys.stdin = input\n brainfuck.brainfuck(program)\n finally:\n sys.stdout = old_stdout\n sys.stdin = old_stdin\n\n return out.getvalue().strip()\n\nclass TestInterpreter(unittest.TestCase):\n def setUp(self):\n\n brainfuck.set_cell_size()\n\n def test_HelloWorld(self):\n result = run_program(\"\"\"\n ++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.\n +++++++..+++.>++.<<+++++++++++++++.>.+++.------.-\n -------.>+.>.\"\"\")\n self.assertEquals(result, \"Hello World!\")\n def test_Squares(self):\n result = run_program(\"\"\"\n ++++[>+++++<-]>[<+++++>-]+<+[>[>+>+<<-]++>>[<<+>>\n -]>>>[-]++>[-]+>>>+[[-]++++++>>>]<<<[[<++++++++<+\n +>>-]+<.<[>----<-]<]<<[>>>>>[>>>[-]+++++++++<[>-<\n -]+++++++++>[-[<->-]+[<<<]]<[>+<-]>]<<-]<<-]\"\"\")\n expected_result = \"\\n\".join([str(x**2) for x in range(101)])\n self.assertEquals(result, expected_result)\n\n def test_ROT13(self):\n result = run_program(\"\"\"\n -,+[-[>>++++[>++++++++<-]<+<-[>+>+>-[>>>]<[[>+<-]\n >>+>]<<<<<-]]>>>[-]+>--[-[<->+++[-]]]<[++++++++++\n ++<[>-[>+>>]>[+[<+>-]>+>>]<<<<<-]>>[<+>-]>[-[-<<[\n -]>>]<<[<<->>-]>>]<<[<<+>>-]]<[-]<.[-]<-,+]\"\"\", \"applesauce\")\n self.assertEquals(result, \"nccyrfnhpr\")\n \n def test_Clean(self):\n self.assertRaises(Exception, brainfuck.clean, \"[[]\")\n self.assertRaises(Exception, brainfuck.clean, \"][\")\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drawSquare():
for i in range(4):
forward(100)
left(90)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drawSquare():
for i in range(4):
forward(100)
left(90)
if __name__ == '__main__':
drawSquare()
up()
forward(200)
down()
drawSquare()
mainloop()
<|reserved_special_token_1|>
from turtle import *
def drawSquare():
for i in range(4):
forward(100)
left(90)
if __name__ == '__main__':
drawSquare()
up()
forward(200)
down()
drawSquare()
mainloop()
|
flexible
|
{
"blob_id": "1ce5b97148885950983e39b7e99d0cdfafe4bc16",
"index": 5382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-4": "from turtle import *\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# This is the template file for Lab #5, Task #1
import numpy
import lab5
def digitize(samples,threshold):
return 1*(samples > threshold)
class ViterbiDecoder:
# given the constraint length and a list of parity generator
# functions, do the initial set up for the decoder. The
# following useful instance variables are created:
# self.k
# self.nstates
# self.r
# self.predecessor_states
# self.expected_parity
def __init__(self,k,glist):
self.k = k # constraint length
self.nstates = 2**(k-1) # number of states in state machine
# number of parity bits transmitted for each message bit
self.r = len(glist)
# States are named using (k-1)-bit integers in the range 0 to
# nstates-1. The bit representation of the integer corresponds
# to state label in the transition diagram. So state 10 is
# named with the integer 2, state 00 is named with the
# integer 0.
# for each state s, figure out the two states in the diagram
# that have transitions ending at state s. Record these two
# states as a two-element tuple.
self.predecessor_states = \
[((2*s+0) % self.nstates,(2*s+1) % self.nstates)
for s in xrange(self.nstates)]
# this is a 2D table implemented as a list of lists.
# self.expected_parity[s1][s2] returns the r-bit sequence
# of parity bits the encoder transmitted when make the
# state transition from s1 to s2.
self.expected_parity = \
[[lab5.expected_parity(s1,s2,k,glist) \
if s1 in self.predecessor_states[s2] else None
for s2 in xrange(self.nstates)]
for s1 in xrange(self.nstates)]
# expected is an r-element list of the expected parity bits
# (or you can also think of them as voltages given how we send
# bits down the channel). received is an r-element list of
# actual sampled voltages for the incoming parity bits.
# This is a hard-decision branch metric, so, as described in
# lab write up, digitize the received voltages to get bits and
# then compute the Hamming distance between the expected sequence
# and the received sequences, return that as the branch metric.
# Consider using lab5.hamming(seq1,seq2) which computes the
# Hamming distance between two binary sequences.
def branch_metric(self,expected,received):
assert len(expected) == len(received) # they must be the same length
vTh = 0.5
dSamples = digitize(received,vTh)
return lab5.hamming(expected,dSamples)
# compute self.PM[...,n] from the batch of r parity bits and
# the path metrics for self.PM[...,n-1] computed on the previous
# iteration. Follow the algorithm described in the lab
# write up. In addition to making an entry for self.PM[n,s] for
# each state s, keep track of the most-likely predecessor
# for each state in the self.Predecessor array. You'll probably
# find the following instance variables and methods useful:
# self.predecessor_states
# self.expected_parity
# self.branch_metric()
def viterbi_step(self,n,received_voltages):
for state in xrange(self.nstates):
(alpha,beta) = self.predecessor_states[state]
(pAlpha,pBeta) = (self.expected_parity[alpha][state],self.expected_parity[beta][state])
bmAlpha = self.branch_metric(pAlpha,received_voltages)
bmBeta = self.branch_metric(pBeta,received_voltages)
pmAlpha = self.PM[alpha][n-1]+bmAlpha
pmBeta = self.PM[beta][n-1]+bmBeta
if pmAlpha <= pmBeta:
self.PM[state][n] = pmAlpha
self.Predecessor[state][n] = alpha
else:
self.PM[state][n] = pmBeta
self.Predecessor[state][n] = beta
# Identify the most-likely ending state of the encoder by
# finding the state s which has the mimimum value of PM[s,n]
# where n points to the last column of the trellis. If there
# are several states with the same minimum value, the end of
# the message has been corrupted by errors, so decrement n
# and repeat the search. Keep doing this until a unique s is
# found. Return the tuple (s,n).
def most_likely_state(self,n):
minState = [0]
minValue = self.PM[0,n]
for state in range(1,self.nstates):
if self.PM[state][n] < minValue:
minState = [state]
minValue = self.PM[state,n]
elif self.PM[state][n] == minValue:
minState.append(state)
if len(minState) > 1: # message is corrupted by errors
return self.most_likely_state(n-1)
else:
return (minState[0],n)
# starting at state s at time n, use the Predecessor
# array to find all the states on the most-likely
# path. Each state contributes a message bit...
def traceback(self,s,n):
message = []
while n > 0:
# message bit that caused transition to
# state s is also the high-order bit of
# the state name
message.append(s >> (self.k-2))
# back to the next earlier state along the path
s = self.Predecessor[s,n]
n -= 1
message.reverse()
return message
# figure out what the transmitter sent from info in the
# received voltages
def decode(self,received_voltages,debug=False):
# figure out how many columns they'll be in the trellis
nreceived = len(received_voltages)
max_n = (nreceived/2) + 1
# this is the path metric trellis itself, organized as a
# 2D array: rows are the states, columns are the time points.
# PM[s,n] is the metric for the most-likely path through the
# trellis arriving at state s at time n.
self.PM = numpy.zeros((self.nstates,max_n),dtype=numpy.float)
# at time 0, the starting state is the most likely, the other
# states are "infinitely" worse.
self.PM[1:self.nstates,0] = 1000000
# a 2D array: rows are the states, columns are the time
# points, contents indicate the predecessor state for each
# current state.
self.Predecessor = numpy.zeros((self.nstates,max_n),
dtype=numpy.int)
# use the Viterbi algorithm to compute PM
# incrementally from the received parity bits.
n = 0
for i in xrange(0,nreceived,self.r):
n += 1
# Fill in the next columns of PM, Predecessor based
# on info in the next r incoming parity bits
self.viterbi_step(n,received_voltages[i:i+self.r])
# print out what was just added to the trellis state
if debug:
print self.PM[:,n],self.Predecessor[:,n]
# find the most-likely ending state from the last row
# of the trellis
s,n = self.most_likely_state(n)
# reconstruct message by tracing the most likely path
# back through the matrix using self.Predecessor.
return self.traceback(s,n)
# print out final path metrics
def dump_state(self):
print self.PM[:,-1]
if __name__=='__main__':
d = ViterbiDecoder(3,(7,6))
received = numpy.array([1,1,1,0,1,1,0,0,0,1,1,0,0,0])
message = d.decode(received,debug=True)
print "decoded message =",message
|
normal
|
{
"blob_id": "19221823f14cf06a55d445fc241fc04e64e5873c",
"index": 8323,
"step-1": "# This is the template file for Lab #5, Task #1\nimport numpy\nimport lab5\n\ndef digitize(samples,threshold):\n\treturn 1*(samples > threshold)\n\nclass ViterbiDecoder:\n # given the constraint length and a list of parity generator\n # functions, do the initial set up for the decoder. The\n # following useful instance variables are created:\n # self.k\n # self.nstates\n # self.r\n # self.predecessor_states\n # self.expected_parity\n def __init__(self,k,glist):\n self.k = k # constraint length\n self.nstates = 2**(k-1) # number of states in state machine\n\n # number of parity bits transmitted for each message bit\n self.r = len(glist) \n\n # States are named using (k-1)-bit integers in the range 0 to\n # nstates-1. The bit representation of the integer corresponds\n # to state label in the transition diagram. So state 10 is\n # named with the integer 2, state 00 is named with the\n # integer 0.\n\n # for each state s, figure out the two states in the diagram\n # that have transitions ending at state s. Record these two\n # states as a two-element tuple.\n self.predecessor_states = \\\n [((2*s+0) % self.nstates,(2*s+1) % self.nstates)\n for s in xrange(self.nstates)]\n\n # this is a 2D table implemented as a list of lists.\n # self.expected_parity[s1][s2] returns the r-bit sequence\n # of parity bits the encoder transmitted when make the\n # state transition from s1 to s2.\n self.expected_parity = \\\n [[lab5.expected_parity(s1,s2,k,glist) \\\n if s1 in self.predecessor_states[s2] else None\n for s2 in xrange(self.nstates)]\n for s1 in xrange(self.nstates)]\n\n # expected is an r-element list of the expected parity bits\n # (or you can also think of them as voltages given how we send\n # bits down the channel). received is an r-element list of\n # actual sampled voltages for the incoming parity bits.\n # This is a hard-decision branch metric, so, as described in\n # lab write up, digitize the received voltages to get bits and\n # then compute the Hamming distance between the expected sequence\n # and the received sequences, return that as the branch metric.\n # Consider using lab5.hamming(seq1,seq2) which computes the\n # Hamming distance between two binary sequences.\n def branch_metric(self,expected,received):\n assert len(expected) == len(received)\t# they must be the same length\n vTh = 0.5\n dSamples = digitize(received,vTh)\n return lab5.hamming(expected,dSamples)\n \n\n # compute self.PM[...,n] from the batch of r parity bits and\n # the path metrics for self.PM[...,n-1] computed on the previous\n # iteration. Follow the algorithm described in the lab\n # write up. In addition to making an entry for self.PM[n,s] for\n # each state s, keep track of the most-likely predecessor\n # for each state in the self.Predecessor array. You'll probably\n # find the following instance variables and methods useful:\n # self.predecessor_states\n # self.expected_parity\n # self.branch_metric()\n def viterbi_step(self,n,received_voltages):\n\t\tfor state in xrange(self.nstates):\n\t\t\t(alpha,beta) = self.predecessor_states[state]\n\t\t\t(pAlpha,pBeta) = (self.expected_parity[alpha][state],self.expected_parity[beta][state])\n\t\t\tbmAlpha = self.branch_metric(pAlpha,received_voltages)\n\t\t\tbmBeta = self.branch_metric(pBeta,received_voltages)\n\t\t\tpmAlpha = self.PM[alpha][n-1]+bmAlpha\n\t\t\tpmBeta = self.PM[beta][n-1]+bmBeta\n\t\t\tif pmAlpha <= pmBeta:\n\t\t\t\tself.PM[state][n] = pmAlpha\n\t\t\t\tself.Predecessor[state][n] = alpha\n\t\t\telse:\n\t\t\t\tself.PM[state][n] = pmBeta\n\t\t\t\tself.Predecessor[state][n] = beta\n\n # Identify the most-likely ending state of the encoder by\n # finding the state s which has the mimimum value of PM[s,n]\n # where n points to the last column of the trellis. If there\n # are several states with the same minimum value, the end of\n # the message has been corrupted by errors, so decrement n\n # and repeat the search. Keep doing this until a unique s is\n # found. Return the tuple (s,n).\n def most_likely_state(self,n):\n \tminState = [0]\n \tminValue = self.PM[0,n]\n for state in range(1,self.nstates):\n \tif self.PM[state][n] < minValue:\n \t\tminState = [state]\n \t\tminValue = self.PM[state,n]\n \telif self.PM[state][n] == minValue:\n \t\tminState.append(state)\n if len(minState) > 1:\t# message is corrupted by errors\n \treturn self.most_likely_state(n-1)\n else:\n \treturn (minState[0],n)\n\n # starting at state s at time n, use the Predecessor\n # array to find all the states on the most-likely\n # path. Each state contributes a message bit...\n def traceback(self,s,n):\n message = []\n while n > 0:\n # message bit that caused transition to\n # state s is also the high-order bit of\n # the state name\n message.append(s >> (self.k-2))\n # back to the next earlier state along the path\n s = self.Predecessor[s,n]\n n -= 1\n message.reverse()\n return message\n\n # figure out what the transmitter sent from info in the\n # received voltages\n def decode(self,received_voltages,debug=False):\n # figure out how many columns they'll be in the trellis\n nreceived = len(received_voltages)\n max_n = (nreceived/2) + 1\n\n # this is the path metric trellis itself, organized as a\n # 2D array: rows are the states, columns are the time points.\n # PM[s,n] is the metric for the most-likely path through the\n # trellis arriving at state s at time n.\n self.PM = numpy.zeros((self.nstates,max_n),dtype=numpy.float)\n\n # at time 0, the starting state is the most likely, the other\n # states are \"infinitely\" worse.\n self.PM[1:self.nstates,0] = 1000000\n\n # a 2D array: rows are the states, columns are the time\n # points, contents indicate the predecessor state for each\n # current state.\n self.Predecessor = numpy.zeros((self.nstates,max_n),\n dtype=numpy.int)\n\n # use the Viterbi algorithm to compute PM\n # incrementally from the received parity bits.\n n = 0\n for i in xrange(0,nreceived,self.r):\n n += 1\n\n # Fill in the next columns of PM, Predecessor based\n # on info in the next r incoming parity bits\n self.viterbi_step(n,received_voltages[i:i+self.r])\n\n # print out what was just added to the trellis state\n if debug:\n print self.PM[:,n],self.Predecessor[:,n]\n\n # find the most-likely ending state from the last row\n # of the trellis\n s,n = self.most_likely_state(n)\n\n # reconstruct message by tracing the most likely path\n # back through the matrix using self.Predecessor.\n return self.traceback(s,n)\n\n # print out final path metrics\n def dump_state(self):\n print self.PM[:,-1]\n\nif __name__=='__main__':\n d = ViterbiDecoder(3,(7,6))\n received = numpy.array([1,1,1,0,1,1,0,0,0,1,1,0,0,0]) \n message = d.decode(received,debug=True)\n print \"decoded message =\",message",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Moving Averages Code
# Load the necessary packages and modules
import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
# Simple Moving Average
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
# SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')
data = data.join(SMA)
return data
# Exponentially-weighted Moving Average
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'],
span=ndays,
min_periods=ndays - 1).mean(),
name='EWMA')
data = data.join(EMA)
return data
# Retrieve the Nifty data from Yahoo finance:
# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')
# close = XSHE000002_data['close']
#
# # Compute the 50-day SMA for NIFTY
# n = 50
# SMA_NIFTY = SMA(XSHE000002_data, n)
# SMA_NIFTY = SMA_NIFTY.dropna()
# SMA = SMA_NIFTY['SMA']
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
# Compute the 200-day EWMA for NIFTY
# ew = 200
# EWMA_NIFTY = EWMA(XSHE000002_data, ew)
# EWMA_NIFTY = EWMA_NIFTY.dropna()
# EWMA = EWMA_NIFTY['EWMA_200']
# Plotting the NIFTY Price Series chart and Moving Averages below
# plt.figure(figsize=(9, 5))
# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')
# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')
# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')
# plt.legend(loc=2, prop={'size': 11})
# plt.grid(True)
# plt.setp(plt.gca().get_xticklabels(), rotation=30)
# plt.show()
|
normal
|
{
"blob_id": "4c9f2b6fd119daa58b7f1dd7153c90df747e62cb",
"index": 1249,
"step-1": "<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\n<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-3": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-5": "# Moving Averages Code\n\n# Load the necessary packages and modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\n# Simple Moving Average \ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n # SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')\n data = data.join(SMA)\n return data\n\n\n# Exponentially-weighted Moving Average\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'],\n span=ndays,\n min_periods=ndays - 1).mean(),\n name='EWMA')\n data = data.join(EMA)\n return data\n\n\n# Retrieve the Nifty data from Yahoo finance:\n# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')\n# close = XSHE000002_data['close']\n#\n# # Compute the 50-day SMA for NIFTY\n# n = 50\n# SMA_NIFTY = SMA(XSHE000002_data, n)\n# SMA_NIFTY = SMA_NIFTY.dropna()\n# SMA = SMA_NIFTY['SMA']\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n\n# Compute the 200-day EWMA for NIFTY\n# ew = 200\n# EWMA_NIFTY = EWMA(XSHE000002_data, ew)\n# EWMA_NIFTY = EWMA_NIFTY.dropna()\n# EWMA = EWMA_NIFTY['EWMA_200']\n\n# Plotting the NIFTY Price Series chart and Moving Averages below\n# plt.figure(figsize=(9, 5))\n# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')\n# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')\n# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')\n# plt.legend(loc=2, prop={'size': 11})\n# plt.grid(True)\n# plt.setp(plt.gca().get_xticklabels(), rotation=30)\n# plt.show()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
<|reserved_special_token_0|>
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold
) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,
args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=
loss_fn)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def predict(data_loader, net, loss_fn):
net.eval()
pred_val = []
act_val = []
vl = Averager()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
vl.add(loss.item())
pred_val.extend(pred.data.tolist())
act_val.extend(y_batch.data.tolist())
return vl.item(), pred_val, act_val
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
def train(args, data_train, label_train, data_val, label_val, subject, fold):
seed_all(args.random_seed)
save_name = '_sub' + str(subject) + '_trial' + str(fold)
set_up(args)
train_loader = get_dataloader(data_train, label_train, args.batch_size)
val_loader = get_dataloader(data_val, label_val, args.batch_size)
model = get_model(args)
para = get_trainable_parameter_num(model)
print('Model {} size:{}'.format(args.model, para))
if CUDA:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
def save_model(name):
previous_model = osp.join(args.save_path, '{}.pth'.format(name))
if os.path.exists(previous_model):
os.remove(previous_model)
torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.
format(name)))
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
timer = Timer()
for epoch in range(1, args.max_epoch + 1):
loss_train, pred_train, act_train = train_one_epoch(data_loader=
train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)
acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=
act_train)
print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,
loss_train, acc_train, f1_train))
loss_val, pred_val, act_val = predict(data_loader=val_loader, net=
model, loss_fn=loss_fn)
acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)
print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(
epoch, loss_val, acc_val, f1_val))
if acc_val > trlog['max_acc']:
trlog['max_acc'] = acc_val
save_model('max-acc')
if args.save_model:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(
fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.
data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce
)
torch.save(model.state_dict(), model_name_reproduce)
trlog['train_loss'].append(loss_train)
trlog['train_acc'].append(acc_train)
trlog['val_loss'].append(loss_val)
trlog['val_acc'].append(acc_val)
print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.
measure(epoch / args.max_epoch), subject, fold))
save_name_ = 'trlog' + save_name
ensure_path(osp.join(args.save_path, 'log_train'))
torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))
return trlog['max_acc']
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold
) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,
args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=
loss_fn)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CUDA = torch.cuda.is_available()
def train_one_epoch(data_loader, net, loss_fn, optimizer):
net.train()
tl = Averager()
pred_train = []
act_train = []
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
tl.add(loss)
pred_train.extend(pred.data.tolist())
act_train.extend(y_batch.data.tolist())
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tl.item(), pred_train, act_train
def predict(data_loader, net, loss_fn):
net.eval()
pred_val = []
act_val = []
vl = Averager()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
vl.add(loss.item())
pred_val.extend(pred.data.tolist())
act_val.extend(y_batch.data.tolist())
return vl.item(), pred_val, act_val
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
def train(args, data_train, label_train, data_val, label_val, subject, fold):
seed_all(args.random_seed)
save_name = '_sub' + str(subject) + '_trial' + str(fold)
set_up(args)
train_loader = get_dataloader(data_train, label_train, args.batch_size)
val_loader = get_dataloader(data_val, label_val, args.batch_size)
model = get_model(args)
para = get_trainable_parameter_num(model)
print('Model {} size:{}'.format(args.model, para))
if CUDA:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
def save_model(name):
previous_model = osp.join(args.save_path, '{}.pth'.format(name))
if os.path.exists(previous_model):
os.remove(previous_model)
torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.
format(name)))
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
timer = Timer()
for epoch in range(1, args.max_epoch + 1):
loss_train, pred_train, act_train = train_one_epoch(data_loader=
train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)
acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=
act_train)
print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,
loss_train, acc_train, f1_train))
loss_val, pred_val, act_val = predict(data_loader=val_loader, net=
model, loss_fn=loss_fn)
acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)
print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(
epoch, loss_val, acc_val, f1_val))
if acc_val > trlog['max_acc']:
trlog['max_acc'] = acc_val
save_model('max-acc')
if args.save_model:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(
fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.
data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce
)
torch.save(model.state_dict(), model_name_reproduce)
trlog['train_loss'].append(loss_train)
trlog['train_acc'].append(acc_train)
trlog['val_loss'].append(loss_val)
trlog['val_acc'].append(acc_val)
print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.
measure(epoch / args.max_epoch), subject, fold))
save_name_ = 'trlog' + save_name
ensure_path(osp.join(args.save_path, 'log_train'))
torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))
return trlog['max_acc']
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold
) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,
args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=
loss_fn)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
<|reserved_special_token_1|>
from utils import *
import copy
import torch.nn as nn
CUDA = torch.cuda.is_available()
def train_one_epoch(data_loader, net, loss_fn, optimizer):
net.train()
tl = Averager()
pred_train = []
act_train = []
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
tl.add(loss)
pred_train.extend(pred.data.tolist())
act_train.extend(y_batch.data.tolist())
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tl.item(), pred_train, act_train
def predict(data_loader, net, loss_fn):
net.eval()
pred_val = []
act_val = []
vl = Averager()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
vl.add(loss.item())
pred_val.extend(pred.data.tolist())
act_val.extend(y_batch.data.tolist())
return vl.item(), pred_val, act_val
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
def train(args, data_train, label_train, data_val, label_val, subject, fold):
seed_all(args.random_seed)
save_name = '_sub' + str(subject) + '_trial' + str(fold)
set_up(args)
train_loader = get_dataloader(data_train, label_train, args.batch_size)
val_loader = get_dataloader(data_val, label_val, args.batch_size)
model = get_model(args)
para = get_trainable_parameter_num(model)
print('Model {} size:{}'.format(args.model, para))
if CUDA:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
def save_model(name):
previous_model = osp.join(args.save_path, '{}.pth'.format(name))
if os.path.exists(previous_model):
os.remove(previous_model)
torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.
format(name)))
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
timer = Timer()
for epoch in range(1, args.max_epoch + 1):
loss_train, pred_train, act_train = train_one_epoch(data_loader=
train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)
acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=
act_train)
print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,
loss_train, acc_train, f1_train))
loss_val, pred_val, act_val = predict(data_loader=val_loader, net=
model, loss_fn=loss_fn)
acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)
print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(
epoch, loss_val, acc_val, f1_val))
if acc_val > trlog['max_acc']:
trlog['max_acc'] = acc_val
save_model('max-acc')
if args.save_model:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(
fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.
data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce
)
torch.save(model.state_dict(), model_name_reproduce)
trlog['train_loss'].append(loss_train)
trlog['train_acc'].append(acc_train)
trlog['val_loss'].append(loss_val)
trlog['val_acc'].append(acc_val)
print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.
measure(epoch / args.max_epoch), subject, fold))
save_name_ = 'trlog' + save_name
ensure_path(osp.join(args.save_path, 'log_train'))
torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))
return trlog['max_acc']
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold
) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,
args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=
loss_fn)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
<|reserved_special_token_1|>
from utils import *
import copy
import torch.nn as nn
CUDA = torch.cuda.is_available()
def train_one_epoch(data_loader, net, loss_fn, optimizer):
net.train()
tl = Averager()
pred_train = []
act_train = []
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
tl.add(loss)
pred_train.extend(pred.data.tolist())
act_train.extend(y_batch.data.tolist())
optimizer.zero_grad()
loss.backward()
optimizer.step()
return tl.item(), pred_train, act_train
def predict(data_loader, net, loss_fn):
net.eval()
pred_val = []
act_val = []
vl = Averager()
with torch.no_grad():
for i, (x_batch, y_batch) in enumerate(data_loader):
if CUDA:
x_batch, y_batch = x_batch.cuda(), y_batch.cuda()
out = net(x_batch)
loss = loss_fn(out, y_batch)
_, pred = torch.max(out, 1)
vl.add(loss.item())
pred_val.extend(pred.data.tolist())
act_val.extend(y_batch.data.tolist())
return vl.item(), pred_val, act_val
def set_up(args):
set_gpu(args.gpu)
ensure_path(args.save_path)
torch.manual_seed(args.random_seed)
torch.backends.cudnn.deterministic = True
def train(args, data_train, label_train, data_val, label_val, subject, fold):
seed_all(args.random_seed)
save_name = '_sub' + str(subject) + '_trial' + str(fold)
set_up(args)
train_loader = get_dataloader(data_train, label_train, args.batch_size)
val_loader = get_dataloader(data_val, label_val, args.batch_size)
model = get_model(args)
para = get_trainable_parameter_num(model)
print('Model {} size:{}'.format(args.model, para))
if CUDA:
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
loss_fn = nn.CrossEntropyLoss()
def save_model(name):
previous_model = osp.join(args.save_path, '{}.pth'.format(name))
if os.path.exists(previous_model):
os.remove(previous_model)
torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.format(name)))
trlog = {}
trlog['args'] = vars(args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
timer = Timer()
for epoch in range(1, args.max_epoch + 1):
loss_train, pred_train, act_train = train_one_epoch(
data_loader=train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)
acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=act_train)
print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'
.format(epoch, loss_train, acc_train, f1_train))
loss_val, pred_val, act_val = predict(
data_loader=val_loader, net=model, loss_fn=loss_fn
)
acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)
print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.
format(epoch, loss_val, acc_val, f1_val))
if acc_val > trlog['max_acc']:
trlog['max_acc'] = acc_val
save_model('max-acc')
if args.save_model:
# save model here for reproduce
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
torch.save(model.state_dict(), model_name_reproduce)
trlog['train_loss'].append(loss_train)
trlog['train_acc'].append(acc_train)
trlog['val_loss'].append(loss_val)
trlog['val_acc'].append(acc_val)
print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch),
subject, fold))
save_name_ = 'trlog' + save_name
ensure_path(osp.join(args.save_path, 'log_train'))
torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))
return trlog['max_acc']
def test(args, data, label, reproduce, subject, fold):
seed_all(args.random_seed)
set_up(args)
test_loader = get_dataloader(data, label, args.batch_size, False)
model = get_model(args)
if CUDA:
model = model.cuda()
loss_fn = nn.CrossEntropyLoss()
if reproduce:
model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'
data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)
save_path = osp.join(args.save_path, data_type)
ensure_path(save_path)
model_name_reproduce = osp.join(save_path, model_name_reproduce)
model.load_state_dict(torch.load(model_name_reproduce))
else:
model.load_state_dict(torch.load(args.load_path))
loss, pred, act = predict(
data_loader=test_loader, net=model, loss_fn=loss_fn
)
acc, f1, cm = get_metrics(y_pred=pred, y_true=act)
print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))
return acc, pred, act
|
flexible
|
{
"blob_id": "6ef78e4308f6e693f50df714a5d7af1785e49d7a",
"index": 7682,
"step-1": "<mask token>\n\n\ndef set_up(args):\n set_gpu(args.gpu)\n ensure_path(args.save_path)\n torch.manual_seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n\n<mask token>\n\n\ndef test(args, data, label, reproduce, subject, fold):\n seed_all(args.random_seed)\n set_up(args)\n test_loader = get_dataloader(data, label, args.batch_size, False)\n model = get_model(args)\n if CUDA:\n model = model.cuda()\n loss_fn = nn.CrossEntropyLoss()\n if reproduce:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold\n ) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,\n args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\n model.load_state_dict(torch.load(model_name_reproduce))\n else:\n model.load_state_dict(torch.load(args.load_path))\n loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=\n loss_fn)\n acc, f1, cm = get_metrics(y_pred=pred, y_true=act)\n print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))\n return acc, pred, act\n",
"step-2": "<mask token>\n\n\ndef predict(data_loader, net, loss_fn):\n net.eval()\n pred_val = []\n act_val = []\n vl = Averager()\n with torch.no_grad():\n for i, (x_batch, y_batch) in enumerate(data_loader):\n if CUDA:\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\n out = net(x_batch)\n loss = loss_fn(out, y_batch)\n _, pred = torch.max(out, 1)\n vl.add(loss.item())\n pred_val.extend(pred.data.tolist())\n act_val.extend(y_batch.data.tolist())\n return vl.item(), pred_val, act_val\n\n\ndef set_up(args):\n set_gpu(args.gpu)\n ensure_path(args.save_path)\n torch.manual_seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef train(args, data_train, label_train, data_val, label_val, subject, fold):\n seed_all(args.random_seed)\n save_name = '_sub' + str(subject) + '_trial' + str(fold)\n set_up(args)\n train_loader = get_dataloader(data_train, label_train, args.batch_size)\n val_loader = get_dataloader(data_val, label_val, args.batch_size)\n model = get_model(args)\n para = get_trainable_parameter_num(model)\n print('Model {} size:{}'.format(args.model, para))\n if CUDA:\n model = model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n loss_fn = nn.CrossEntropyLoss()\n\n def save_model(name):\n previous_model = osp.join(args.save_path, '{}.pth'.format(name))\n if os.path.exists(previous_model):\n os.remove(previous_model)\n torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.\n format(name)))\n trlog = {}\n trlog['args'] = vars(args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['max_acc'] = 0.0\n timer = Timer()\n for epoch in range(1, args.max_epoch + 1):\n loss_train, pred_train, act_train = train_one_epoch(data_loader=\n train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)\n acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=\n act_train)\n print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,\n loss_train, acc_train, f1_train))\n loss_val, pred_val, act_val = predict(data_loader=val_loader, net=\n model, loss_fn=loss_fn)\n acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)\n print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(\n epoch, loss_val, acc_val, f1_val))\n if acc_val > trlog['max_acc']:\n trlog['max_acc'] = acc_val\n save_model('max-acc')\n if args.save_model:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(\n fold) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.\n data_format, args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce\n )\n torch.save(model.state_dict(), model_name_reproduce)\n trlog['train_loss'].append(loss_train)\n trlog['train_acc'].append(acc_train)\n trlog['val_loss'].append(loss_val)\n trlog['val_acc'].append(acc_val)\n print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.\n measure(epoch / args.max_epoch), subject, fold))\n save_name_ = 'trlog' + save_name\n ensure_path(osp.join(args.save_path, 'log_train'))\n torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))\n return trlog['max_acc']\n\n\ndef test(args, data, label, reproduce, subject, fold):\n seed_all(args.random_seed)\n set_up(args)\n test_loader = get_dataloader(data, label, args.batch_size, False)\n model = get_model(args)\n if CUDA:\n model = model.cuda()\n loss_fn = nn.CrossEntropyLoss()\n if reproduce:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold\n ) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,\n args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\n model.load_state_dict(torch.load(model_name_reproduce))\n else:\n model.load_state_dict(torch.load(args.load_path))\n loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=\n loss_fn)\n acc, f1, cm = get_metrics(y_pred=pred, y_true=act)\n print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))\n return acc, pred, act\n",
"step-3": "<mask token>\nCUDA = torch.cuda.is_available()\n\n\ndef train_one_epoch(data_loader, net, loss_fn, optimizer):\n net.train()\n tl = Averager()\n pred_train = []\n act_train = []\n for i, (x_batch, y_batch) in enumerate(data_loader):\n if CUDA:\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\n out = net(x_batch)\n loss = loss_fn(out, y_batch)\n _, pred = torch.max(out, 1)\n tl.add(loss)\n pred_train.extend(pred.data.tolist())\n act_train.extend(y_batch.data.tolist())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return tl.item(), pred_train, act_train\n\n\ndef predict(data_loader, net, loss_fn):\n net.eval()\n pred_val = []\n act_val = []\n vl = Averager()\n with torch.no_grad():\n for i, (x_batch, y_batch) in enumerate(data_loader):\n if CUDA:\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\n out = net(x_batch)\n loss = loss_fn(out, y_batch)\n _, pred = torch.max(out, 1)\n vl.add(loss.item())\n pred_val.extend(pred.data.tolist())\n act_val.extend(y_batch.data.tolist())\n return vl.item(), pred_val, act_val\n\n\ndef set_up(args):\n set_gpu(args.gpu)\n ensure_path(args.save_path)\n torch.manual_seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef train(args, data_train, label_train, data_val, label_val, subject, fold):\n seed_all(args.random_seed)\n save_name = '_sub' + str(subject) + '_trial' + str(fold)\n set_up(args)\n train_loader = get_dataloader(data_train, label_train, args.batch_size)\n val_loader = get_dataloader(data_val, label_val, args.batch_size)\n model = get_model(args)\n para = get_trainable_parameter_num(model)\n print('Model {} size:{}'.format(args.model, para))\n if CUDA:\n model = model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n loss_fn = nn.CrossEntropyLoss()\n\n def save_model(name):\n previous_model = osp.join(args.save_path, '{}.pth'.format(name))\n if os.path.exists(previous_model):\n os.remove(previous_model)\n torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.\n format(name)))\n trlog = {}\n trlog['args'] = vars(args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['max_acc'] = 0.0\n timer = Timer()\n for epoch in range(1, args.max_epoch + 1):\n loss_train, pred_train, act_train = train_one_epoch(data_loader=\n train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)\n acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=\n act_train)\n print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,\n loss_train, acc_train, f1_train))\n loss_val, pred_val, act_val = predict(data_loader=val_loader, net=\n model, loss_fn=loss_fn)\n acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)\n print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(\n epoch, loss_val, acc_val, f1_val))\n if acc_val > trlog['max_acc']:\n trlog['max_acc'] = acc_val\n save_model('max-acc')\n if args.save_model:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(\n fold) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.\n data_format, args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce\n )\n torch.save(model.state_dict(), model_name_reproduce)\n trlog['train_loss'].append(loss_train)\n trlog['train_acc'].append(acc_train)\n trlog['val_loss'].append(loss_val)\n trlog['val_acc'].append(acc_val)\n print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.\n measure(epoch / args.max_epoch), subject, fold))\n save_name_ = 'trlog' + save_name\n ensure_path(osp.join(args.save_path, 'log_train'))\n torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))\n return trlog['max_acc']\n\n\ndef test(args, data, label, reproduce, subject, fold):\n seed_all(args.random_seed)\n set_up(args)\n test_loader = get_dataloader(data, label, args.batch_size, False)\n model = get_model(args)\n if CUDA:\n model = model.cuda()\n loss_fn = nn.CrossEntropyLoss()\n if reproduce:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold\n ) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,\n args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\n model.load_state_dict(torch.load(model_name_reproduce))\n else:\n model.load_state_dict(torch.load(args.load_path))\n loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=\n loss_fn)\n acc, f1, cm = get_metrics(y_pred=pred, y_true=act)\n print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))\n return acc, pred, act\n",
"step-4": "from utils import *\nimport copy\nimport torch.nn as nn\nCUDA = torch.cuda.is_available()\n\n\ndef train_one_epoch(data_loader, net, loss_fn, optimizer):\n net.train()\n tl = Averager()\n pred_train = []\n act_train = []\n for i, (x_batch, y_batch) in enumerate(data_loader):\n if CUDA:\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\n out = net(x_batch)\n loss = loss_fn(out, y_batch)\n _, pred = torch.max(out, 1)\n tl.add(loss)\n pred_train.extend(pred.data.tolist())\n act_train.extend(y_batch.data.tolist())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return tl.item(), pred_train, act_train\n\n\ndef predict(data_loader, net, loss_fn):\n net.eval()\n pred_val = []\n act_val = []\n vl = Averager()\n with torch.no_grad():\n for i, (x_batch, y_batch) in enumerate(data_loader):\n if CUDA:\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\n out = net(x_batch)\n loss = loss_fn(out, y_batch)\n _, pred = torch.max(out, 1)\n vl.add(loss.item())\n pred_val.extend(pred.data.tolist())\n act_val.extend(y_batch.data.tolist())\n return vl.item(), pred_val, act_val\n\n\ndef set_up(args):\n set_gpu(args.gpu)\n ensure_path(args.save_path)\n torch.manual_seed(args.random_seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef train(args, data_train, label_train, data_val, label_val, subject, fold):\n seed_all(args.random_seed)\n save_name = '_sub' + str(subject) + '_trial' + str(fold)\n set_up(args)\n train_loader = get_dataloader(data_train, label_train, args.batch_size)\n val_loader = get_dataloader(data_val, label_val, args.batch_size)\n model = get_model(args)\n para = get_trainable_parameter_num(model)\n print('Model {} size:{}'.format(args.model, para))\n if CUDA:\n model = model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\n loss_fn = nn.CrossEntropyLoss()\n\n def save_model(name):\n previous_model = osp.join(args.save_path, '{}.pth'.format(name))\n if os.path.exists(previous_model):\n os.remove(previous_model)\n torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.\n format(name)))\n trlog = {}\n trlog['args'] = vars(args)\n trlog['train_loss'] = []\n trlog['val_loss'] = []\n trlog['train_acc'] = []\n trlog['val_acc'] = []\n trlog['max_acc'] = 0.0\n timer = Timer()\n for epoch in range(1, args.max_epoch + 1):\n loss_train, pred_train, act_train = train_one_epoch(data_loader=\n train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)\n acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=\n act_train)\n print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'.format(epoch,\n loss_train, acc_train, f1_train))\n loss_val, pred_val, act_val = predict(data_loader=val_loader, net=\n model, loss_fn=loss_fn)\n acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)\n print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.format(\n epoch, loss_val, acc_val, f1_val))\n if acc_val > trlog['max_acc']:\n trlog['max_acc'] = acc_val\n save_model('max-acc')\n if args.save_model:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(\n fold) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.\n data_format, args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce\n )\n torch.save(model.state_dict(), model_name_reproduce)\n trlog['train_loss'].append(loss_train)\n trlog['train_acc'].append(acc_train)\n trlog['val_loss'].append(loss_val)\n trlog['val_acc'].append(acc_val)\n print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.\n measure(epoch / args.max_epoch), subject, fold))\n save_name_ = 'trlog' + save_name\n ensure_path(osp.join(args.save_path, 'log_train'))\n torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))\n return trlog['max_acc']\n\n\ndef test(args, data, label, reproduce, subject, fold):\n seed_all(args.random_seed)\n set_up(args)\n test_loader = get_dataloader(data, label, args.batch_size, False)\n model = get_model(args)\n if CUDA:\n model = model.cuda()\n loss_fn = nn.CrossEntropyLoss()\n if reproduce:\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold\n ) + '.pth'\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format,\n args.label_type)\n save_path = osp.join(args.save_path, data_type)\n ensure_path(save_path)\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\n model.load_state_dict(torch.load(model_name_reproduce))\n else:\n model.load_state_dict(torch.load(args.load_path))\n loss, pred, act = predict(data_loader=test_loader, net=model, loss_fn=\n loss_fn)\n acc, f1, cm = get_metrics(y_pred=pred, y_true=act)\n print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))\n return acc, pred, act\n",
"step-5": "\r\nfrom utils import *\r\nimport copy\r\nimport torch.nn as nn\r\n\r\nCUDA = torch.cuda.is_available()\r\n\r\n\r\ndef train_one_epoch(data_loader, net, loss_fn, optimizer):\r\n net.train()\r\n tl = Averager()\r\n pred_train = []\r\n act_train = []\r\n for i, (x_batch, y_batch) in enumerate(data_loader):\r\n if CUDA:\r\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\r\n\r\n out = net(x_batch)\r\n loss = loss_fn(out, y_batch)\r\n _, pred = torch.max(out, 1)\r\n tl.add(loss)\r\n pred_train.extend(pred.data.tolist())\r\n act_train.extend(y_batch.data.tolist())\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n return tl.item(), pred_train, act_train\r\n\r\n\r\ndef predict(data_loader, net, loss_fn):\r\n net.eval()\r\n pred_val = []\r\n act_val = []\r\n vl = Averager()\r\n with torch.no_grad():\r\n for i, (x_batch, y_batch) in enumerate(data_loader):\r\n if CUDA:\r\n x_batch, y_batch = x_batch.cuda(), y_batch.cuda()\r\n\r\n out = net(x_batch)\r\n loss = loss_fn(out, y_batch)\r\n _, pred = torch.max(out, 1)\r\n vl.add(loss.item())\r\n pred_val.extend(pred.data.tolist())\r\n act_val.extend(y_batch.data.tolist())\r\n return vl.item(), pred_val, act_val\r\n\r\n\r\ndef set_up(args):\r\n set_gpu(args.gpu)\r\n ensure_path(args.save_path)\r\n torch.manual_seed(args.random_seed)\r\n torch.backends.cudnn.deterministic = True\r\n\r\n\r\ndef train(args, data_train, label_train, data_val, label_val, subject, fold):\r\n seed_all(args.random_seed)\r\n save_name = '_sub' + str(subject) + '_trial' + str(fold)\r\n set_up(args)\r\n\r\n train_loader = get_dataloader(data_train, label_train, args.batch_size)\r\n\r\n val_loader = get_dataloader(data_val, label_val, args.batch_size)\r\n\r\n model = get_model(args)\r\n para = get_trainable_parameter_num(model)\r\n print('Model {} size:{}'.format(args.model, para))\r\n\r\n if CUDA:\r\n model = model.cuda()\r\n\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n def save_model(name):\r\n previous_model = osp.join(args.save_path, '{}.pth'.format(name))\r\n if os.path.exists(previous_model):\r\n os.remove(previous_model)\r\n torch.save(model.state_dict(), osp.join(args.save_path, '{}.pth'.format(name)))\r\n\r\n trlog = {}\r\n trlog['args'] = vars(args)\r\n trlog['train_loss'] = []\r\n trlog['val_loss'] = []\r\n trlog['train_acc'] = []\r\n trlog['val_acc'] = []\r\n trlog['max_acc'] = 0.0\r\n\r\n timer = Timer()\r\n\r\n for epoch in range(1, args.max_epoch + 1):\r\n\r\n loss_train, pred_train, act_train = train_one_epoch(\r\n data_loader=train_loader, net=model, loss_fn=loss_fn, optimizer=optimizer)\r\n\r\n acc_train, f1_train, _ = get_metrics(y_pred=pred_train, y_true=act_train)\r\n print('epoch {}, loss={:.4f} acc={:.4f} f1={:.4f}'\r\n .format(epoch, loss_train, acc_train, f1_train))\r\n\r\n loss_val, pred_val, act_val = predict(\r\n data_loader=val_loader, net=model, loss_fn=loss_fn\r\n )\r\n acc_val, f1_val, _ = get_metrics(y_pred=pred_val, y_true=act_val)\r\n print('epoch {}, val, loss={:.4f} acc={:.4f} f1={:.4f}'.\r\n format(epoch, loss_val, acc_val, f1_val))\r\n\r\n\r\n if acc_val > trlog['max_acc']:\r\n trlog['max_acc'] = acc_val\r\n save_model('max-acc')\r\n\r\n if args.save_model:\r\n # save model here for reproduce\r\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'\r\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)\r\n save_path = osp.join(args.save_path, data_type)\r\n ensure_path(save_path)\r\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\r\n torch.save(model.state_dict(), model_name_reproduce)\r\n\r\n trlog['train_loss'].append(loss_train)\r\n trlog['train_acc'].append(acc_train)\r\n trlog['val_loss'].append(loss_val)\r\n trlog['val_acc'].append(acc_val)\r\n\r\n print('ETA:{}/{} SUB:{} FOLD:{}'.format(timer.measure(), timer.measure(epoch / args.max_epoch),\r\n subject, fold))\r\n save_name_ = 'trlog' + save_name\r\n ensure_path(osp.join(args.save_path, 'log_train'))\r\n torch.save(trlog, osp.join(args.save_path, 'log_train', save_name_))\r\n\r\n return trlog['max_acc']\r\n\r\n\r\ndef test(args, data, label, reproduce, subject, fold):\r\n seed_all(args.random_seed)\r\n set_up(args)\r\n\r\n test_loader = get_dataloader(data, label, args.batch_size, False)\r\n\r\n model = get_model(args)\r\n if CUDA:\r\n model = model.cuda()\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n if reproduce:\r\n model_name_reproduce = 'sub' + str(subject) + '_fold' + str(fold) + '.pth'\r\n data_type = 'model_{}_{}_{}'.format(args.dataset, args.data_format, args.label_type)\r\n save_path = osp.join(args.save_path, data_type)\r\n ensure_path(save_path)\r\n model_name_reproduce = osp.join(save_path, model_name_reproduce)\r\n model.load_state_dict(torch.load(model_name_reproduce))\r\n else:\r\n model.load_state_dict(torch.load(args.load_path))\r\n loss, pred, act = predict(\r\n data_loader=test_loader, net=model, loss_fn=loss_fn\r\n )\r\n acc, f1, cm = get_metrics(y_pred=pred, y_true=act)\r\n print('>>> Test: loss={:.4f} acc={:.4f} f1={:.4f}'.format(loss, acc, f1))\r\n return acc, pred, act\r\n\r\n\r\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
queries = []
for n in range(2, 51):
for k in range(n, n * n + 1):
queries.append((n, k))
print(len(queries))
for n, k in queries:
print(n, k)
|
normal
|
{
"blob_id": "798d5c68a0aa2057c28d7f333905f20fef965d70",
"index": 2850,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-3": "queries = []\nfor n in range(2, 51):\n for k in range(n, n * n + 1):\n queries.append((n, k))\nprint(len(queries))\nfor n, k in queries:\n print(n, k)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
# Lesson_5 Activity 2 Mailroom Part 2
import os
def page_break():
""" Print a separator to distinguish new 'pages'"""
print("_"*75+"\n")
def get_amount():
"""Get valid donation amount from user"""
while True:
try:
amount = input("How much did they donate: ")
if str(amount).lower() == 'exit':
return amount
else:
return float(amount)
except ValueError:
print("you have made an invalid choice, try again.")
def get_key(donor_chart):
""" Return key for sorted function """
return(sum(donor_chart[1]))
def menu_page():
""" Return valid menu option from user """
while True:
try:
print("Please choose one of the following options(1,2,3):"
"\n1. Send a Thank you. \n2. Create a report"
"\n3. Send Letters to Everyone \n4. Quit")
option = int(input('--->'))
except ValueError:
print("You have made an invalid choice, try again.")
page_break()
return option
def send_thanks():
""" Send Thanks """
page_break()
while True:
list_names = [item[0] for item in donor_chart.items()]
try:
print("To whom would you like to say thank you?\n"
"(type \"list\" for a full list of names or"
"\"exit\" to return to the menu)")
name = input("--->")
except ValueError:
print("you have made an invalid choice, try again.")
page_break()
continue
if name == 'list':
print(("{}\n"*len(list_names)).format(*list_names))
continue
elif name in list_names:
amount = get_amount()
new_donor = False
elif name.lower() == 'exit':
break
else:
addname = input("The name you selected is not in the list,"
" would you like to add it(y/n)? ")
if addname[0].lower() == 'y':
amount = get_amount()
new_donor = True
elif addname.lower() == 'exit':
break
else:
print("\nName was not added, try again\n")
continue
if amount == "exit":
break
add_donation(name, amount, new_donor)
print("\nDear {} \nThank you for your generous donation of ${:.2f}!!\n"
"Now all of the kittens will get "
"to eat this year".format(name, amount))
break
def create_report():
""" Create Report """
page_break()
list_names = [item[0] for item in donor_chart.items()]
new_list = []
for donor in donor_chart.items():
sum_don = sum(donor[1])
new_list.append(sum_don)
col_lab = ["Donor Name", "Total Given", "Num Gifts", "Average Gift"]
max_name = max([len(x) for x in list_names])
max_don = []
for don in donor_chart.items():
max_don.append(max(don[1]))
max_donl = len(str(max(max_don)))
max_gift = len(col_lab[2])
if max_donl < len(col_lab[1]):
max_donl = len(col_lab[1])
format_col = "\n{:<" + "{}".format(max_name+5) + "}|{:^"
format_col += "{}".format(max_donl+5)
format_col += "}|{:^" + "{}".format(max_gift+5)
format_col += "}|{:>" + "{}".format(max_donl+5) + "}"
print(format_col.format(*col_lab))
print("-"*len(format_col.format(*col_lab)))
sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)
for donor in sorted_list:
num_gifts = len(donor[1])
avg_gift = sum(donor[1])/num_gifts
format_item = "{:<" + "{}".format(max_name+5) + "}${:>"
format_item += "{}".format(max_donl+5) + ".2f}{:>"
format_item += "{}".format(max_gift+5) + "d} ${:>"
format_item += "{}".format(max_donl+5) + ".2f}"
print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))
def send_letters():
""" Write letters to each donor in the donor chart and
save them in a user specified directory """
while True:
try:
dir_path = input("Please type the desired directory "
"to save the letters: ")
letter_form = ("Dear {},\n\n\tThank you for your very "
"kind donation of ${:.2f}!")
letter_form += ("\n\n\tNow all of the kittens will "
"get to eat this year!")
letter_form += ("\n\n\t\t\t\t Cheers! \n\t\t\t\t "
"-The Team")
if dir_path.lower() == "Exit":
break
if not os.path.exists(dir_path):
print("That is not a valid directory, using working directory")
dir_path = os.getcwd()
for name, donation in donor_chart.items():
file_name = ("{}.txt".format(name))
path_name = dir_path + "/" + file_name
with open(path_name, 'w') as file:
file.write(letter_form.format(name, sum(donation)))
break
except ValueError:
print("\nsomething went wrong please try again: ")
def add_donation(name, amount, donor_bool):
""" add a donation for a new or existing donor """
if donor_bool is False:
donor_chart.get(list_names.index(name), [1]).append(amount)
else:
donor_chart.update({name: [amount]})
return
def menu_quit():
""" return quit for menus """
return "Quit"
if __name__ == '__main__':
donor_chart = {"Justin Thyme": [1, 1, 1],
"Beau Andarrow": [207.121324, 400.321234, 12345.001234],
"Crystal Clearwater": [80082],
"Harry Shins": [1.00, 2.00, 3.00],
"Bob Zuruncle": [0.53, 7.00],
"Al Kaseltzer": [1010101, 666.00],
"Joe Somebody": [25]}
options = range(1, 5)
menus = (send_thanks, create_report, send_letters, menu_quit)
menu_dict = dict(zip(options, menus))
option = 0
while True:
page_break()
try:
option = menu_page()
if menu_dict[option]() == "Quit":
break
except KeyError:
print("You have made an invalid choice, try again.")
page_break()
|
normal
|
{
"blob_id": "8a192fc08a65c80b8733a9d07374156c09f36598",
"index": 2823,
"step-1": "<mask token>\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\n<mask token>\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print('_' * 75 + '\\n')\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\n<mask token>\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print('_' * 75 + '\\n')\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input('How much did they donate: ')\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print('you have made an invalid choice, try again.')\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return sum(donor_chart[1])\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\n \"\"\"Please choose one of the following options(1,2,3):\n1. Send a Thank you. \n2. Create a report\n3. Send Letters to Everyone \n4. Quit\"\"\"\n )\n option = int(input('--->'))\n except ValueError:\n print('You have made an invalid choice, try again.')\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\n \"\"\"To whom would you like to say thank you?\n(type \"list\" for a full list of names or\"exit\" to return to the menu)\"\"\"\n )\n name = input('--->')\n except ValueError:\n print('you have made an invalid choice, try again.')\n page_break()\n continue\n if name == 'list':\n print(('{}\\n' * len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\n 'The name you selected is not in the list, would you like to add it(y/n)? '\n )\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print('\\nName was not added, try again\\n')\n continue\n if amount == 'exit':\n break\n add_donation(name, amount, new_donor)\n print(\n \"\"\"\nDear {} \nThank you for your generous donation of ${:.2f}!!\nNow all of the kittens will get to eat this year\"\"\"\n .format(name, amount))\n break\n\n\ndef create_report():\n \"\"\" Create Report \"\"\"\n page_break()\n list_names = [item[0] for item in donor_chart.items()]\n new_list = []\n for donor in donor_chart.items():\n sum_don = sum(donor[1])\n new_list.append(sum_don)\n col_lab = ['Donor Name', 'Total Given', 'Num Gifts', 'Average Gift']\n max_name = max([len(x) for x in list_names])\n max_don = []\n for don in donor_chart.items():\n max_don.append(max(don[1]))\n max_donl = len(str(max(max_don)))\n max_gift = len(col_lab[2])\n if max_donl < len(col_lab[1]):\n max_donl = len(col_lab[1])\n format_col = '\\n{:<' + '{}'.format(max_name + 5) + '}|{:^'\n format_col += '{}'.format(max_donl + 5)\n format_col += '}|{:^' + '{}'.format(max_gift + 5)\n format_col += '}|{:>' + '{}'.format(max_donl + 5) + '}'\n print(format_col.format(*col_lab))\n print('-' * len(format_col.format(*col_lab)))\n sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)\n for donor in sorted_list:\n num_gifts = len(donor[1])\n avg_gift = sum(donor[1]) / num_gifts\n format_item = '{:<' + '{}'.format(max_name + 5) + '}${:>'\n format_item += '{}'.format(max_donl + 5) + '.2f}{:>'\n format_item += '{}'.format(max_gift + 5) + 'd} ${:>'\n format_item += '{}'.format(max_donl + 5) + '.2f}'\n print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\n 'Please type the desired directory to save the letters: ')\n letter_form = (\n 'Dear {},\\n\\n\\tThank you for your very kind donation of ${:.2f}!'\n )\n letter_form += (\n '\\n\\n\\tNow all of the kittens will get to eat this year!')\n letter_form += '\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t -The Team'\n if dir_path.lower() == 'Exit':\n break\n if not os.path.exists(dir_path):\n print('That is not a valid directory, using working directory')\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = '{}.txt'.format(name)\n path_name = dir_path + '/' + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print('\\nsomething went wrong please try again: ')\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return 'Quit'\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\n# Lesson_5 Activity 2 Mailroom Part 2\n\nimport os\n\n\ndef page_break():\n \"\"\" Print a separator to distinguish new 'pages'\"\"\"\n print(\"_\"*75+\"\\n\")\n\n\ndef get_amount():\n \"\"\"Get valid donation amount from user\"\"\"\n while True:\n try:\n amount = input(\"How much did they donate: \")\n if str(amount).lower() == 'exit':\n return amount\n else:\n return float(amount)\n except ValueError:\n print(\"you have made an invalid choice, try again.\")\n\n\ndef get_key(donor_chart):\n \"\"\" Return key for sorted function \"\"\"\n return(sum(donor_chart[1]))\n\n\ndef menu_page():\n \"\"\" Return valid menu option from user \"\"\"\n while True:\n try:\n print(\"Please choose one of the following options(1,2,3):\"\n \"\\n1. Send a Thank you. \\n2. Create a report\"\n \"\\n3. Send Letters to Everyone \\n4. Quit\")\n option = int(input('--->'))\n except ValueError:\n print(\"You have made an invalid choice, try again.\")\n page_break()\n return option\n\n\ndef send_thanks():\n \"\"\" Send Thanks \"\"\"\n page_break()\n while True:\n list_names = [item[0] for item in donor_chart.items()]\n try:\n print(\"To whom would you like to say thank you?\\n\"\n \"(type \\\"list\\\" for a full list of names or\"\n \"\\\"exit\\\" to return to the menu)\")\n name = input(\"--->\")\n except ValueError:\n print(\"you have made an invalid choice, try again.\")\n page_break()\n continue\n if name == 'list':\n print((\"{}\\n\"*len(list_names)).format(*list_names))\n continue\n elif name in list_names:\n amount = get_amount()\n new_donor = False\n elif name.lower() == 'exit':\n break\n else:\n addname = input(\"The name you selected is not in the list,\"\n \" would you like to add it(y/n)? \")\n if addname[0].lower() == 'y':\n amount = get_amount()\n new_donor = True\n elif addname.lower() == 'exit':\n break\n else:\n print(\"\\nName was not added, try again\\n\")\n continue\n if amount == \"exit\":\n break\n add_donation(name, amount, new_donor)\n print(\"\\nDear {} \\nThank you for your generous donation of ${:.2f}!!\\n\"\n \"Now all of the kittens will get \"\n \"to eat this year\".format(name, amount))\n break\n\n\ndef create_report():\n \"\"\" Create Report \"\"\"\n page_break()\n list_names = [item[0] for item in donor_chart.items()]\n new_list = []\n for donor in donor_chart.items():\n sum_don = sum(donor[1])\n new_list.append(sum_don)\n col_lab = [\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"]\n max_name = max([len(x) for x in list_names])\n max_don = []\n for don in donor_chart.items():\n max_don.append(max(don[1]))\n max_donl = len(str(max(max_don)))\n max_gift = len(col_lab[2])\n if max_donl < len(col_lab[1]):\n max_donl = len(col_lab[1])\n format_col = \"\\n{:<\" + \"{}\".format(max_name+5) + \"}|{:^\"\n format_col += \"{}\".format(max_donl+5)\n format_col += \"}|{:^\" + \"{}\".format(max_gift+5)\n format_col += \"}|{:>\" + \"{}\".format(max_donl+5) + \"}\"\n print(format_col.format(*col_lab))\n print(\"-\"*len(format_col.format(*col_lab)))\n sorted_list = sorted(donor_chart.items(), key=get_key, reverse=True)\n for donor in sorted_list:\n num_gifts = len(donor[1])\n avg_gift = sum(donor[1])/num_gifts\n format_item = \"{:<\" + \"{}\".format(max_name+5) + \"}${:>\"\n format_item += \"{}\".format(max_donl+5) + \".2f}{:>\"\n format_item += \"{}\".format(max_gift+5) + \"d} ${:>\"\n format_item += \"{}\".format(max_donl+5) + \".2f}\"\n print(format_item.format(donor[0], sum(donor[1]), num_gifts, avg_gift))\n\n\ndef send_letters():\n \"\"\" Write letters to each donor in the donor chart and\n save them in a user specified directory \"\"\"\n while True:\n try:\n dir_path = input(\"Please type the desired directory \"\n \"to save the letters: \")\n letter_form = (\"Dear {},\\n\\n\\tThank you for your very \"\n \"kind donation of ${:.2f}!\")\n letter_form += (\"\\n\\n\\tNow all of the kittens will \"\n \"get to eat this year!\")\n letter_form += (\"\\n\\n\\t\\t\\t\\t Cheers! \\n\\t\\t\\t\\t \"\n \"-The Team\")\n if dir_path.lower() == \"Exit\":\n break\n if not os.path.exists(dir_path):\n print(\"That is not a valid directory, using working directory\")\n dir_path = os.getcwd()\n for name, donation in donor_chart.items():\n file_name = (\"{}.txt\".format(name))\n path_name = dir_path + \"/\" + file_name\n with open(path_name, 'w') as file:\n file.write(letter_form.format(name, sum(donation)))\n break\n except ValueError:\n print(\"\\nsomething went wrong please try again: \")\n\n\ndef add_donation(name, amount, donor_bool):\n \"\"\" add a donation for a new or existing donor \"\"\"\n if donor_bool is False:\n donor_chart.get(list_names.index(name), [1]).append(amount)\n else:\n donor_chart.update({name: [amount]})\n return\n\n\ndef menu_quit():\n \"\"\" return quit for menus \"\"\"\n return \"Quit\"\n\nif __name__ == '__main__':\n donor_chart = {\"Justin Thyme\": [1, 1, 1],\n \"Beau Andarrow\": [207.121324, 400.321234, 12345.001234],\n \"Crystal Clearwater\": [80082],\n \"Harry Shins\": [1.00, 2.00, 3.00],\n \"Bob Zuruncle\": [0.53, 7.00],\n \"Al Kaseltzer\": [1010101, 666.00],\n \"Joe Somebody\": [25]}\n\n options = range(1, 5)\n menus = (send_thanks, create_report, send_letters, menu_quit)\n menu_dict = dict(zip(options, menus))\n\n option = 0\n while True:\n page_break()\n try:\n option = menu_page()\n if menu_dict[option]() == \"Quit\":\n break\n except KeyError:\n print(\"You have made an invalid choice, try again.\")\n page_break()\n",
"step-ids": [
6,
7,
8,
9,
12
]
}
|
[
6,
7,
8,
9,
12
] |
class Solution:
def calculate(self, s: str) ->int:
nums = []
ops = []
def cal():
a = nums.pop()
b = nums.pop()
c = ops.pop()
if c == '+':
nums.append(b + a)
elif c == '-':
nums.append(b - a)
elif c == '*':
nums.append(b * a)
else:
nums.append(int(b / a))
i = 0
while i < len(s):
if s[i] == ' ':
i += 1
continue
elif s[i].isdigit():
t = ''
while i < len(s) and s[i].isdigit():
t += s[i]
i += 1
nums.append(int(t))
elif not ops:
ops.append(s[i])
i += 1
elif s[i] == '+' or s[i] == '-':
while ops:
cal()
ops.append(s[i])
i += 1
else:
while ops and (ops[-1] == '*' or ops[-1] == '/'):
cal()
ops.append(s[i])
i += 1
while ops:
cal()
return nums[-1]
|
normal
|
{
"blob_id": "0ff8743e54509a76e9a7add4be9da279bdee82a6",
"index": 5032,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def calculate(self, s: str) ->int:\n nums = []\n ops = []\n\n def cal():\n a = nums.pop()\n b = nums.pop()\n c = ops.pop()\n if c == '+':\n nums.append(b + a)\n elif c == '-':\n nums.append(b - a)\n elif c == '*':\n nums.append(b * a)\n else:\n nums.append(int(b / a))\n i = 0\n while i < len(s):\n if s[i] == ' ':\n i += 1\n continue\n elif s[i].isdigit():\n t = ''\n while i < len(s) and s[i].isdigit():\n t += s[i]\n i += 1\n nums.append(int(t))\n elif not ops:\n ops.append(s[i])\n i += 1\n elif s[i] == '+' or s[i] == '-':\n while ops:\n cal()\n ops.append(s[i])\n i += 1\n else:\n while ops and (ops[-1] == '*' or ops[-1] == '/'):\n cal()\n ops.append(s[i])\n i += 1\n while ops:\n cal()\n return nums[-1]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from rest_framework import serializers
from dailytasks.models import Tasks
class TasksSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Tasks
fields = ['id', 'created', 'title', 'description', 'status', 'user']
|
normal
|
{
"blob_id": "3fa1736fd87448ec0da4649153521d0aba048ccf",
"index": 3689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-3": "<mask token>\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-4": "from rest_framework import serializers\nfrom dailytasks.models import Tasks\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
heart()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pen = turtle.Turtle()
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
heart()
<|reserved_special_token_1|>
import turtle
pen = turtle.Turtle()
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
heart()
|
flexible
|
{
"blob_id": "fa925d0ef4f9df3fdf9a51c7fcc88933609bc9e3",
"index": 3980,
"step-1": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-3": "<mask token>\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-4": "import turtle\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import sys
import time
from abc import ABC, abstractmethod
from PySide6.QtGui import QPixmap
from PySide6.QtWidgets import QApplication
import inupdater.resource
from inupdater.splash import SplashScreen
class UserInterface(ABC):
"""Interface for GUI element"""
def __init__(self) -> None:
self.state = 0
@abstractmethod
def show_message(self, msg: str):
"""Show a message"""
@abstractmethod
def set_state(self, state: int):
"""Set the program progress by a state value"""
@abstractmethod
def close(self):
"""Close the updtater UI"""
class CmdUI(UserInterface):
"""Commande line UI"""
def __init__(self) -> None:
super().__init__()
def show_message(self, msg: str):
print(self.state, msg)
def set_state(self, state: int):
"""Set the program progress by a state value"""
self.state = state
def close(self):
pass
class QtUI(UserInterface):
def __init__(self) -> None:
super().__init__()
app = QApplication(sys.argv)
qpix = QPixmap(":/src/inupdater/data/splash.png")
self.splash = SplashScreen(qpix)
self.splash.set_progress_max(10)
self.splash.show()
def show_message(self, msg: str):
self.splash.set_message(msg)
def set_state(self, state: int):
"""Set the program progress by a state value"""
self.splash.set_progress_value(self.state)
self.state = state
time.sleep(1)
def close(self):
self.splash.close()
|
normal
|
{
"blob_id": "efeb069a7e2aab7262a557236c693752d2973523",
"index": 4169,
"step-1": "<mask token>\n\n\nclass UserInterface(ABC):\n <mask token>\n <mask token>\n\n @abstractmethod\n def show_message(self, msg: str):\n \"\"\"Show a message\"\"\"\n <mask token>\n\n @abstractmethod\n def close(self):\n \"\"\"Close the updtater UI\"\"\"\n\n\nclass CmdUI(UserInterface):\n \"\"\"Commande line UI\"\"\"\n\n def __init__(self) ->None:\n super().__init__()\n\n def show_message(self, msg: str):\n print(self.state, msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.state = state\n\n def close(self):\n pass\n\n\nclass QtUI(UserInterface):\n\n def __init__(self) ->None:\n super().__init__()\n app = QApplication(sys.argv)\n qpix = QPixmap(':/src/inupdater/data/splash.png')\n self.splash = SplashScreen(qpix)\n self.splash.set_progress_max(10)\n self.splash.show()\n\n def show_message(self, msg: str):\n self.splash.set_message(msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)\n\n def close(self):\n self.splash.close()\n",
"step-2": "<mask token>\n\n\nclass UserInterface(ABC):\n <mask token>\n\n def __init__(self) ->None:\n self.state = 0\n\n @abstractmethod\n def show_message(self, msg: str):\n \"\"\"Show a message\"\"\"\n <mask token>\n\n @abstractmethod\n def close(self):\n \"\"\"Close the updtater UI\"\"\"\n\n\nclass CmdUI(UserInterface):\n \"\"\"Commande line UI\"\"\"\n\n def __init__(self) ->None:\n super().__init__()\n\n def show_message(self, msg: str):\n print(self.state, msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.state = state\n\n def close(self):\n pass\n\n\nclass QtUI(UserInterface):\n\n def __init__(self) ->None:\n super().__init__()\n app = QApplication(sys.argv)\n qpix = QPixmap(':/src/inupdater/data/splash.png')\n self.splash = SplashScreen(qpix)\n self.splash.set_progress_max(10)\n self.splash.show()\n\n def show_message(self, msg: str):\n self.splash.set_message(msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)\n\n def close(self):\n self.splash.close()\n",
"step-3": "<mask token>\n\n\nclass UserInterface(ABC):\n <mask token>\n\n def __init__(self) ->None:\n self.state = 0\n\n @abstractmethod\n def show_message(self, msg: str):\n \"\"\"Show a message\"\"\"\n\n @abstractmethod\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close the updtater UI\"\"\"\n\n\nclass CmdUI(UserInterface):\n \"\"\"Commande line UI\"\"\"\n\n def __init__(self) ->None:\n super().__init__()\n\n def show_message(self, msg: str):\n print(self.state, msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.state = state\n\n def close(self):\n pass\n\n\nclass QtUI(UserInterface):\n\n def __init__(self) ->None:\n super().__init__()\n app = QApplication(sys.argv)\n qpix = QPixmap(':/src/inupdater/data/splash.png')\n self.splash = SplashScreen(qpix)\n self.splash.set_progress_max(10)\n self.splash.show()\n\n def show_message(self, msg: str):\n self.splash.set_message(msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)\n\n def close(self):\n self.splash.close()\n",
"step-4": "<mask token>\n\n\nclass UserInterface(ABC):\n \"\"\"Interface for GUI element\"\"\"\n\n def __init__(self) ->None:\n self.state = 0\n\n @abstractmethod\n def show_message(self, msg: str):\n \"\"\"Show a message\"\"\"\n\n @abstractmethod\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close the updtater UI\"\"\"\n\n\nclass CmdUI(UserInterface):\n \"\"\"Commande line UI\"\"\"\n\n def __init__(self) ->None:\n super().__init__()\n\n def show_message(self, msg: str):\n print(self.state, msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.state = state\n\n def close(self):\n pass\n\n\nclass QtUI(UserInterface):\n\n def __init__(self) ->None:\n super().__init__()\n app = QApplication(sys.argv)\n qpix = QPixmap(':/src/inupdater/data/splash.png')\n self.splash = SplashScreen(qpix)\n self.splash.set_progress_max(10)\n self.splash.show()\n\n def show_message(self, msg: str):\n self.splash.set_message(msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)\n\n def close(self):\n self.splash.close()\n",
"step-5": "import sys\nimport time\nfrom abc import ABC, abstractmethod\n\nfrom PySide6.QtGui import QPixmap\nfrom PySide6.QtWidgets import QApplication\n\nimport inupdater.resource\nfrom inupdater.splash import SplashScreen\n\n\nclass UserInterface(ABC):\n \"\"\"Interface for GUI element\"\"\"\n\n def __init__(self) -> None:\n self.state = 0\n\n @abstractmethod\n def show_message(self, msg: str):\n \"\"\"Show a message\"\"\"\n\n @abstractmethod\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close the updtater UI\"\"\"\n\n\nclass CmdUI(UserInterface):\n \"\"\"Commande line UI\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def show_message(self, msg: str):\n print(self.state, msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.state = state\n\n def close(self):\n pass\n\n\nclass QtUI(UserInterface):\n def __init__(self) -> None:\n super().__init__()\n app = QApplication(sys.argv)\n qpix = QPixmap(\":/src/inupdater/data/splash.png\")\n self.splash = SplashScreen(qpix)\n self.splash.set_progress_max(10)\n self.splash.show()\n\n def show_message(self, msg: str):\n self.splash.set_message(msg)\n\n def set_state(self, state: int):\n \"\"\"Set the program progress by a state value\"\"\"\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)\n\n def close(self):\n self.splash.close()\n",
"step-ids": [
14,
15,
16,
17,
19
]
}
|
[
14,
15,
16,
17,
19
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.