code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__title__ = 'space_tracer'
__version__ = '4.10.2'
__author__ = 'Don Kirkby'
__author_email__ = 'donkirkby@gmail.com'
__description__ = 'Trade time for space when debugging your code.'
__url__ = 'https://donkirkby.github.io/live-py-plugin/'
<|reserved_special_token_1|>
""" A set of constants to describe the package.
Don't put any code in here, because it must be safe to execute in setup.py. """
__title__ = 'space_tracer' # => name in setup.py
__version__ = '4.10.2'
__author__ = "Don Kirkby"
__author_email__ = "donkirkby@gmail.com"
__description__ = "Trade time for space when debugging your code."
__url__ = "https://donkirkby.github.io/live-py-plugin/"
|
flexible
|
{
"blob_id": "6cb29ebd9c0f2660d0eb868bec87ffd97cf4d198",
"index": 6262,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__title__ = 'space_tracer'\n__version__ = '4.10.2'\n__author__ = 'Don Kirkby'\n__author_email__ = 'donkirkby@gmail.com'\n__description__ = 'Trade time for space when debugging your code.'\n__url__ = 'https://donkirkby.github.io/live-py-plugin/'\n",
"step-3": "\"\"\" A set of constants to describe the package.\n\nDon't put any code in here, because it must be safe to execute in setup.py. \"\"\"\n\n__title__ = 'space_tracer' # => name in setup.py\n__version__ = '4.10.2'\n__author__ = \"Don Kirkby\"\n__author_email__ = \"donkirkby@gmail.com\"\n__description__ = \"Trade time for space when debugging your code.\"\n__url__ = \"https://donkirkby.github.io/live-py-plugin/\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def load_data(train_source, train_dist, test_source, test_dist, max_len,
vocab_size):
"""
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
"""
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:vocab_size]
for i, item in enumerate(X_vocab):
if item == 'newlinechar':
X_vocab[i] = '-'
for item in X_vocab:
print >> x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:vocab_size]
for i, item in enumerate(y_vocab):
if item == 'newlinechar':
y_vocab[i] = '-'
for item in y_vocab:
print >> y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
X_ix_to_word = X_vocab
X_ix_to_word.insert(0, 'ZERO')
X_ix_to_word.append('UNK')
X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab
), y_word_to_ix, y_ix_to_word
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(
x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
<|reserved_special_token_0|>
def process_data(word_sentences, max_len, word_to_ix):
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.0
return sequences
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data(train_source, train_dist, test_source, test_dist, max_len,
vocab_size):
"""
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
"""
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:vocab_size]
for i, item in enumerate(X_vocab):
if item == 'newlinechar':
X_vocab[i] = '-'
for item in X_vocab:
print >> x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:vocab_size]
for i, item in enumerate(y_vocab):
if item == 'newlinechar':
y_vocab[i] = '-'
for item in y_vocab:
print >> y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
X_ix_to_word = X_vocab
X_ix_to_word.insert(0, 'ZERO')
X_ix_to_word.append('UNK')
X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab
), y_word_to_ix, y_ix_to_word
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(
x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,
hidden_size, num_layers):
model = Sequential()
model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,
mask_zero=True))
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
return model
def process_data(word_sentences, max_len, word_to_ix):
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.0
return sequences
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data(train_source, train_dist, test_source, test_dist, max_len,
vocab_size):
"""
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
"""
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:vocab_size]
for i, item in enumerate(X_vocab):
if item == 'newlinechar':
X_vocab[i] = '-'
for item in X_vocab:
print >> x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:vocab_size]
for i, item in enumerate(y_vocab):
if item == 'newlinechar':
y_vocab[i] = '-'
for item in y_vocab:
print >> y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
X_ix_to_word = X_vocab
X_ix_to_word.insert(0, 'ZERO')
X_ix_to_word.append('UNK')
X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab
), y_word_to_ix, y_ix_to_word
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(
x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,
hidden_size, num_layers):
model = Sequential()
model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,
mask_zero=True))
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
return model
def process_data(word_sentences, max_len, word_to_ix):
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.0
return sequences
def find_checkpoint_file(folder):
checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]
if len(checkpoint_file) == 0:
return []
modified_time = [os.path.getmtime(f) for f in checkpoint_file]
return checkpoint_file[np.argmax(modified_time)]
<|reserved_special_token_1|>
from keras.preprocessing.text import text_to_word_sequence
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam, RMSprop
import numpy as np
import os
import datetime
import re
def load_data(train_source, train_dist, test_source, test_dist, max_len,
vocab_size):
"""
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
"""
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'),
y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <=
max_len and len(y) <= max_len]
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:vocab_size]
for i, item in enumerate(X_vocab):
if item == 'newlinechar':
X_vocab[i] = '-'
for item in X_vocab:
print >> x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:vocab_size]
for i, item in enumerate(y_vocab):
if item == 'newlinechar':
y_vocab[i] = '-'
for item in y_vocab:
print >> y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
X_ix_to_word = X_vocab
X_ix_to_word.insert(0, 'ZERO')
X_ix_to_word.append('UNK')
X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab
), y_word_to_ix, y_ix_to_word
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(
x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,
hidden_size, num_layers):
model = Sequential()
model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,
mask_zero=True))
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',
metrics=['accuracy'])
return model
def process_data(word_sentences, max_len, word_to_ix):
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.0
return sequences
def find_checkpoint_file(folder):
checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]
if len(checkpoint_file) == 0:
return []
modified_time = [os.path.getmtime(f) for f in checkpoint_file]
return checkpoint_file[np.argmax(modified_time)]
<|reserved_special_token_1|>
from keras.preprocessing.text import text_to_word_sequence
from keras.models import Sequential
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam, RMSprop
#from nltk import FreqDist
import numpy as np
import os
import datetime
import re
def load_data(train_source, train_dist, test_source, test_dist, max_len, vocab_size):
'''
fin = open(test_source, "r")
data2 = fin.read()
fin.close()
fout = open(train_source, "a")
fout.write(data2)
fout.close()
fin = open(test_dist, "r")
data2 = fin.read()
fin.close()
fout = open(train_dist, "a")
fout.write(data2)
fout.close()
'''
# Reading raw text from source and destination files
f = open(train_source, 'r')
X_data = f.read()
f.close()
f = open(train_dist, 'r')
y_data = f.read()
f.close()
# Splitting raw text into array of sequences
X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]
y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\n'), y_data.split('\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]
#Check or Create Vocab
vocab_files = [f for f in os.listdir('.') if 'vocab' in f]
x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')
y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')
if len(vocab_files) == 0:
vocab_x = {}
for line in X:
for token in line:
if not token in vocab_x:
vocab_x[token] = 0
vocab_x[token] += 1
X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)
X_vocab = X_vocab[0:(vocab_size)]
for (i, item) in enumerate(X_vocab):
if item == "newlinechar":
X_vocab[i] = "-"
for item in X_vocab:
print>>x_vocab_file, item
x_vocab_file.close()
vocab_y = {}
for line in y:
for token in line:
if not token in vocab_y:
vocab_y[token] = 0
vocab_y[token] += 1
y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)
y_vocab = y_vocab[0:(vocab_size)]
for (i, item) in enumerate(y_vocab):
if item == "newlinechar":
y_vocab[i] = "-"
for item in y_vocab:
print>>y_vocab_file, item
y_vocab_file.close()
else:
X_vocab = x_vocab_file.read().splitlines()
y_vocab = y_vocab_file.read().splitlines()
# Creating the vocabulary set with the most common words
#dist = FreqDist(np.hstack(X))
#X_vocab = dist.most_common(vocab_size-1)
#dist = FreqDist(np.hstack(y))
#y_vocab = dist.most_common(vocab_size-1)
# Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary
X_ix_to_word = X_vocab
# Adding the word "ZERO" to the beginning of the array
X_ix_to_word.insert(0, 'ZERO')
# Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)
X_ix_to_word.append('UNK')
# Creating the word-to-index dictionary from the array created above
#X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)}
X_word_to_ix = dict((map(reversed, enumerate(X_ix_to_word))))
# Converting each word to its index value
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
y_ix_to_word = y_vocab
y_ix_to_word.insert(0, 'ZERO')
y_ix_to_word.append('UNK')
#y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)}
y_word_to_ix = dict((map(reversed, enumerate(y_ix_to_word))))
for i, sentence in enumerate(y):
for j, word in enumerate(sentence):
if word in y_word_to_ix:
y[i][j] = y_word_to_ix[word]
else:
y[i][j] = y_word_to_ix['UNK']
return (X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab), y_word_to_ix, y_ix_to_word)
def load_test_data(source, X_word_to_ix, max_len):
f = open(source, 'r')
X_data = f.read()
f.close()
X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\n') if len(x) > 0 and len(x) <= max_len]
for i, sentence in enumerate(X):
for j, word in enumerate(sentence):
if word in X_word_to_ix:
X[i][j] = X_word_to_ix[word]
else:
X[i][j] = X_word_to_ix['UNK']
return X
def create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers):
model = Sequential()
# Creating encoder network
model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True))
model.add(LSTM(hidden_size))
model.add(RepeatVector(y_max_len))
# Creating decoder network
for _ in range(num_layers):
model.add(LSTM(hidden_size, return_sequences=True))
model.add(TimeDistributed(Dense(y_vocab_len)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def process_data(word_sentences, max_len, word_to_ix):
# Vectorizing each element in each sequence
sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))
for i, sentence in enumerate(word_sentences):
for j, word in enumerate(sentence):
sequences[i, j, word] = 1.
return sequences
def find_checkpoint_file(folder):
checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]
if len(checkpoint_file) == 0:
return []
modified_time = [os.path.getmtime(f) for f in checkpoint_file]
return checkpoint_file[np.argmax(modified_time)]
|
flexible
|
{
"blob_id": "2962ef1d7ecd4e8d472b9dc36664e4e8745391fd",
"index": 3616,
"step-1": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\n<mask token>\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n",
"step-4": "from keras.preprocessing.text import text_to_word_sequence\nfrom keras.models import Sequential\nfrom keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import Adam, RMSprop\nimport numpy as np\nimport os\nimport datetime\nimport re\n\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len,\n vocab_size):\n \"\"\"\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n \"\"\"\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'),\n y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <=\n max_len and len(y) <= max_len]\n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0:\n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:vocab_size]\n for i, item in enumerate(X_vocab):\n if item == 'newlinechar':\n X_vocab[i] = '-'\n for item in X_vocab:\n print >> x_vocab_file, item\n x_vocab_file.close()\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:vocab_size]\n for i, item in enumerate(y_vocab):\n if item == 'newlinechar':\n y_vocab[i] = '-'\n for item in y_vocab:\n print >> y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n X_ix_to_word = X_vocab\n X_ix_to_word.insert(0, 'ZERO')\n X_ix_to_word.append('UNK')\n X_word_to_ix = dict(map(reversed, enumerate(X_ix_to_word)))\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n y_word_to_ix = dict(map(reversed, enumerate(y_ix_to_word)))\n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n return X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab\n ), y_word_to_ix, y_ix_to_word\n\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(\n x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len,\n hidden_size, num_layers):\n model = Sequential()\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len,\n mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\n\ndef process_data(word_sentences, max_len, word_to_ix):\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.0\n return sequences\n\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n",
"step-5": "from keras.preprocessing.text import text_to_word_sequence\nfrom keras.models import Sequential\nfrom keras.layers import Activation, TimeDistributed, Dense, RepeatVector, recurrent, Embedding\nfrom keras.layers.recurrent import LSTM\nfrom keras.optimizers import Adam, RMSprop\n#from nltk import FreqDist\nimport numpy as np\nimport os\nimport datetime\nimport re\n\ndef load_data(train_source, train_dist, test_source, test_dist, max_len, vocab_size):\n '''\n fin = open(test_source, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_source, \"a\")\n fout.write(data2)\n fout.close()\n\n fin = open(test_dist, \"r\")\n data2 = fin.read()\n fin.close()\n fout = open(train_dist, \"a\")\n fout.write(data2)\n fout.close()\n '''\n \n # Reading raw text from source and destination files\n f = open(train_source, 'r')\n X_data = f.read()\n f.close()\n f = open(train_dist, 'r')\n y_data = f.read()\n f.close()\n\n # Splitting raw text into array of sequences\n X = [text_to_word_sequence(x)[::-1] for x, y in zip(X_data.split('\\n'), y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]\n y = [text_to_word_sequence(y) for x, y in zip(X_data.split('\\n'), y_data.split('\\n')) if len(x) > 0 and len(y) > 0 and len(x) <= max_len and len(y) <= max_len]\n\n #Check or Create Vocab \n vocab_files = [f for f in os.listdir('.') if 'vocab' in f]\n x_vocab_file = open(os.path.join('', 'vocab_x.txt'), 'a+')\n y_vocab_file = open(os.path.join('', 'vocab_y.txt'), 'a+')\n if len(vocab_files) == 0: \n vocab_x = {}\n for line in X:\n for token in line:\n if not token in vocab_x:\n vocab_x[token] = 0\n vocab_x[token] += 1\n\n X_vocab = sorted(vocab_x, key=vocab_x.get, reverse=True)\n X_vocab = X_vocab[0:(vocab_size)]\n for (i, item) in enumerate(X_vocab):\n if item == \"newlinechar\":\n X_vocab[i] = \"-\"\n for item in X_vocab:\n print>>x_vocab_file, item\n x_vocab_file.close()\n\n vocab_y = {}\n for line in y:\n for token in line:\n if not token in vocab_y:\n vocab_y[token] = 0\n vocab_y[token] += 1\n\n y_vocab = sorted(vocab_y, key=vocab_y.get, reverse=True)\n y_vocab = y_vocab[0:(vocab_size)]\n for (i, item) in enumerate(y_vocab):\n if item == \"newlinechar\":\n y_vocab[i] = \"-\"\n for item in y_vocab:\n print>>y_vocab_file, item\n y_vocab_file.close()\n else:\n X_vocab = x_vocab_file.read().splitlines()\n y_vocab = y_vocab_file.read().splitlines()\n \n # Creating the vocabulary set with the most common words\n #dist = FreqDist(np.hstack(X))\n #X_vocab = dist.most_common(vocab_size-1)\n #dist = FreqDist(np.hstack(y))\n #y_vocab = dist.most_common(vocab_size-1)\n\n # Creating an array of words from the vocabulary set, we will use this array as index-to-word dictionary\n X_ix_to_word = X_vocab\n # Adding the word \"ZERO\" to the beginning of the array\n X_ix_to_word.insert(0, 'ZERO')\n # Adding the word 'UNK' to the end of the array (stands for UNKNOWN words)\n X_ix_to_word.append('UNK')\n\n # Creating the word-to-index dictionary from the array created above\n #X_word_to_ix = {word:ix for ix, word in enumerate(X_ix_to_word)}\n \n X_word_to_ix = dict((map(reversed, enumerate(X_ix_to_word))))\n\n # Converting each word to its index value\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n\n y_ix_to_word = y_vocab\n y_ix_to_word.insert(0, 'ZERO')\n y_ix_to_word.append('UNK')\n \n #y_word_to_ix = {word:ix for ix, word in enumerate(y_ix_to_word)}\n \n y_word_to_ix = dict((map(reversed, enumerate(y_ix_to_word))))\n \n for i, sentence in enumerate(y):\n for j, word in enumerate(sentence):\n if word in y_word_to_ix:\n y[i][j] = y_word_to_ix[word]\n else:\n y[i][j] = y_word_to_ix['UNK']\n \n return (X, len(X_vocab), X_word_to_ix, X_ix_to_word, y, len(y_vocab), y_word_to_ix, y_ix_to_word)\n\ndef load_test_data(source, X_word_to_ix, max_len):\n f = open(source, 'r')\n X_data = f.read()\n f.close()\n\n X = [text_to_word_sequence(x)[::-1] for x in X_data.split('\\n') if len(x) > 0 and len(x) <= max_len]\n for i, sentence in enumerate(X):\n for j, word in enumerate(sentence):\n if word in X_word_to_ix:\n X[i][j] = X_word_to_ix[word]\n else:\n X[i][j] = X_word_to_ix['UNK']\n return X\n\ndef create_model(X_vocab_len, X_max_len, y_vocab_len, y_max_len, hidden_size, num_layers):\n model = Sequential()\n\n # Creating encoder network\n model.add(Embedding(X_vocab_len, 1000, input_length=X_max_len, mask_zero=True))\n model.add(LSTM(hidden_size))\n model.add(RepeatVector(y_max_len))\n\n # Creating decoder network\n for _ in range(num_layers):\n model.add(LSTM(hidden_size, return_sequences=True))\n model.add(TimeDistributed(Dense(y_vocab_len)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n return model\n\ndef process_data(word_sentences, max_len, word_to_ix):\n # Vectorizing each element in each sequence\n sequences = np.zeros((len(word_sentences), max_len, len(word_to_ix)))\n for i, sentence in enumerate(word_sentences):\n for j, word in enumerate(sentence):\n sequences[i, j, word] = 1.\n return sequences\n\ndef find_checkpoint_file(folder):\n checkpoint_file = [f for f in os.listdir(folder) if 'checkpoint' in f]\n if len(checkpoint_file) == 0:\n return []\n modified_time = [os.path.getmtime(f) for f in checkpoint_file]\n return checkpoint_file[np.argmax(modified_time)]\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write(
"""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
"""
.format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
<|reserved_special_token_0|>
setup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,
long_description=LONG_DESCRIPTION, long_description_content_type=
LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=
AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=
find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',
'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',
'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=
KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console', 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
HERE = pathlib.Path(__file__).parent
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = 3, 6
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write(
"""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
"""
.format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
VERSION = '0.2.0'
PACKAGE_NAME = 'mdsanima'
AUTHOR = 'Marcin Rozewski'
AUTHOR_EMAIL = 'marcinrozewski@gmail.com'
URL = 'https://github.com/mdsanima/mdsanima'
LICENSE = 'MIT'
DESCRIPTION = (
'The package contains modules that will help in calculating rendering time.'
)
LONG_DESCRIPTION = (HERE / 'README.rst').read_text()
LONG_DESC_TYPE = 'text/x-rst'
INSTALL_REQUIRES = ['humanfriendly']
KEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',
'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',
'cycles', 'redshift', 'render engine', 'octane render', 'mantra',
'vray', 'clarisse ifx']
setup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,
long_description=LONG_DESCRIPTION, long_description_content_type=
LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=
AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=
find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',
'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',
'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=
KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console', 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = 3, 6
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write(
"""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
"""
.format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
VERSION = '0.2.0'
PACKAGE_NAME = 'mdsanima'
AUTHOR = 'Marcin Rozewski'
AUTHOR_EMAIL = 'marcinrozewski@gmail.com'
URL = 'https://github.com/mdsanima/mdsanima'
LICENSE = 'MIT'
DESCRIPTION = (
'The package contains modules that will help in calculating rendering time.'
)
LONG_DESCRIPTION = (HERE / 'README.rst').read_text()
LONG_DESC_TYPE = 'text/x-rst'
INSTALL_REQUIRES = ['humanfriendly']
KEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',
'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',
'cycles', 'redshift', 'render engine', 'octane render', 'mantra',
'vray', 'clarisse ifx']
setup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,
long_description=LONG_DESCRIPTION, long_description_content_type=
LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=
AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=
find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',
'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',
'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=
KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console', 'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', 'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS', 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling'])
<|reserved_special_token_1|>
'''
MDSANIMA Setup
'''
import sys
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
# This check and everything above must remain compatible with Python 2.7.
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""==========================
Unsupported Python Version
==========================
This version of MDSANIMA requires Python {}.{}
but you're trying to install it on Python {}.{}
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
VERSION = '0.2.0'
PACKAGE_NAME = 'mdsanima'
AUTHOR = 'Marcin Rozewski'
AUTHOR_EMAIL = 'marcinrozewski@gmail.com'
URL = 'https://github.com/mdsanima/mdsanima'
LICENSE = 'MIT'
DESCRIPTION = 'The package contains modules that will help in calculating rendering time.'
LONG_DESCRIPTION = (HERE / "README.rst").read_text()
LONG_DESC_TYPE = "text/x-rst"
INSTALL_REQUIRES = [
'humanfriendly'
]
KEYWORDS = [
'mdsanima',
'render time',
'calculator render time',
'blender',
'blener3d',
'rendering',
'houdini',
'sidefx',
'vfx',
'cinema4d',
'cycles',
'redshift',
'render engine',
'octane render',
'mantra',
'vray',
'clarisse ifx'
]
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
extras_require={
"docs": [
'sphinx',
'sphinx-autoapi',
'sphinx-rtd-theme',
'sphinx-bootstrap-theme',
'sphinx-prompt',
'sphinx-tabs',
'recommonmark'
],
},
python_requires='>=3.6',
keywords=KEYWORDS,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
],
)
|
flexible
|
{
"blob_id": "2827a56c12c1e15a6fe26ce182aa07d76735d77f",
"index": 407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\n<mask token>\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-3": "<mask token>\nHERE = pathlib.Path(__file__).parent\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = 3, 6\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = 'marcinrozewski@gmail.com'\nURL = 'https://github.com/mdsanima/mdsanima'\nLICENSE = 'MIT'\nDESCRIPTION = (\n 'The package contains modules that will help in calculating rendering time.'\n )\nLONG_DESCRIPTION = (HERE / 'README.rst').read_text()\nLONG_DESC_TYPE = 'text/x-rst'\nINSTALL_REQUIRES = ['humanfriendly']\nKEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',\n 'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',\n 'cycles', 'redshift', 'render engine', 'octane render', 'mantra',\n 'vray', 'clarisse ifx']\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-4": "<mask token>\nimport sys\nimport pathlib\nfrom setuptools import setup, find_packages\nHERE = pathlib.Path(__file__).parent\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = 3, 6\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\n \"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\"\n .format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = 'marcinrozewski@gmail.com'\nURL = 'https://github.com/mdsanima/mdsanima'\nLICENSE = 'MIT'\nDESCRIPTION = (\n 'The package contains modules that will help in calculating rendering time.'\n )\nLONG_DESCRIPTION = (HERE / 'README.rst').read_text()\nLONG_DESC_TYPE = 'text/x-rst'\nINSTALL_REQUIRES = ['humanfriendly']\nKEYWORDS = ['mdsanima', 'render time', 'calculator render time', 'blender',\n 'blener3d', 'rendering', 'houdini', 'sidefx', 'vfx', 'cinema4d',\n 'cycles', 'redshift', 'render engine', 'octane render', 'mantra',\n 'vray', 'clarisse ifx']\nsetup(name=PACKAGE_NAME, version=VERSION, description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n LONG_DESC_TYPE, author=AUTHOR, license=LICENSE, author_email=\n AUTHOR_EMAIL, url=URL, install_requires=INSTALL_REQUIRES, packages=\n find_packages(), extras_require={'docs': ['sphinx', 'sphinx-autoapi',\n 'sphinx-rtd-theme', 'sphinx-bootstrap-theme', 'sphinx-prompt',\n 'sphinx-tabs', 'recommonmark']}, python_requires='>=3.6', keywords=\n KEYWORDS, classifiers=['Development Status :: 5 - Production/Stable',\n 'Environment :: Console', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License', 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS', 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling'])\n",
"step-5": "'''\nMDSANIMA Setup\n'''\n\nimport sys\nimport pathlib\nfrom setuptools import setup, find_packages\n\nHERE = pathlib.Path(__file__).parent\n\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = (3, 6)\n\n# This check and everything above must remain compatible with Python 2.7.\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n sys.stderr.write(\"\"\"==========================\nUnsupported Python Version\n==========================\nThis version of MDSANIMA requires Python {}.{}\nbut you're trying to install it on Python {}.{}\n\"\"\".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))\n sys.exit(1)\n\nVERSION = '0.2.0'\nPACKAGE_NAME = 'mdsanima'\nAUTHOR = 'Marcin Rozewski'\nAUTHOR_EMAIL = 'marcinrozewski@gmail.com'\nURL = 'https://github.com/mdsanima/mdsanima'\n\nLICENSE = 'MIT'\nDESCRIPTION = 'The package contains modules that will help in calculating rendering time.'\nLONG_DESCRIPTION = (HERE / \"README.rst\").read_text()\nLONG_DESC_TYPE = \"text/x-rst\"\n\nINSTALL_REQUIRES = [\n 'humanfriendly'\n]\n\nKEYWORDS = [\n 'mdsanima',\n 'render time',\n 'calculator render time',\n 'blender',\n 'blener3d',\n 'rendering',\n 'houdini',\n 'sidefx',\n 'vfx',\n 'cinema4d',\n 'cycles',\n 'redshift',\n 'render engine',\n 'octane render',\n 'mantra',\n 'vray',\n 'clarisse ifx'\n]\n\nsetup(name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESC_TYPE,\n author=AUTHOR,\n license=LICENSE,\n author_email=AUTHOR_EMAIL,\n url=URL,\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n extras_require={\n \"docs\": [\n 'sphinx', \n 'sphinx-autoapi', \n 'sphinx-rtd-theme', \n 'sphinx-bootstrap-theme', \n 'sphinx-prompt', \n 'sphinx-tabs', \n 'recommonmark'\n ],\n },\n python_requires='>=3.6',\n keywords=KEYWORDS,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Multimedia :: Graphics :: 3D Rendering',\n 'Topic :: Multimedia :: Graphics :: 3D Modeling',\n ],\n )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.test import TestCase, Client
from pdf_crawler.models import Document
from rest_framework.reverse import reverse
class TestCase(TestCase):
client = Client()
def setUp(self):
Document.objects.create(name='First').save()
def test_endpoints(self):
"""
test for endpoints
"""
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-list')).status_code, 200)
self.assertEqual(self.client.get(reverse(
'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)
self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).
status_code, 200)
|
normal
|
{
"blob_id": "0d28ab54f08301d9788ca9a5e46d522e043e9507",
"index": 4474,
"step-1": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-3": "<mask token>\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-4": "from django.test import TestCase, Client\nfrom pdf_crawler.models import Document\nfrom rest_framework.reverse import reverse\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
import sys
from collections import namedtuple
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, \
QHBoxLayout, QStackedWidget, QListWidget, QListWidgetItem
from PyQt5.QtCore import Qt, QSize
from runWidget import RunWidget
from recordWidget import RecordWidget
def QListWidget_qss():
return '''
QListWidget{
outline: 0px;
}
QListWidget {
min-width: 30px;
max-width: 50px;
color: Black;
background: #CCCCCC;
}
QListWidget::Item:selected {
background: #888888;
border-left: 5px solid red;
}
HistoryPanel:hover {
background: rgb(52, 52, 52);
}
'''
class MainCentralWidget(QWidget):
def __init__(self):
super().__init__()
tab_bar = self.getTabBar(('录制', '运行'))
tab_page = self.getTabPage()
tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)
hbox = QHBoxLayout(spacing=0)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(tab_bar)
hbox.addWidget(tab_page)
self.setLayout(hbox)
def getTabBar(self, names):
tab_bar = QListWidget()
tab_bar.setStyleSheet(QListWidget_qss())
tab_bar.setFrameShape(QListWidget.NoFrame)
tab_bar.setItemAlignment(Qt.AlignCenter)
tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
for name in names:
item = QListWidgetItem(name)
item.setTextAlignment(Qt.AlignCenter)
item.setSizeHint(QSize(50, 50))
tab_bar.addItem(item)
tab_bar.setCurrentRow(0)
return tab_bar
def getTabPage(self):
tab_page = QStackedWidget()
tab_page.addWidget(RecordWidget())
tab_page.addWidget(RunWidget())
return tab_page
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(50, 50, 900, 300)
self.setWindowTitle('AutoMouse')
self.setCentralWidget(MainCentralWidget())
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "252a6b97f108b7fdc165ccb2a7f61ce31f129d3d",
"index": 8693,
"step-1": "<mask token>\n\n\nclass MainCentralWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n tab_bar = self.getTabBar(('录制', '运行'))\n tab_page = self.getTabPage()\n tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)\n hbox = QHBoxLayout(spacing=0)\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(tab_bar)\n hbox.addWidget(tab_page)\n self.setLayout(hbox)\n <mask token>\n\n def getTabPage(self):\n tab_page = QStackedWidget()\n tab_page.addWidget(RecordWidget())\n tab_page.addWidget(RunWidget())\n return tab_page\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(50, 50, 900, 300)\n self.setWindowTitle('AutoMouse')\n self.setCentralWidget(MainCentralWidget())\n self.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainCentralWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n tab_bar = self.getTabBar(('录制', '运行'))\n tab_page = self.getTabPage()\n tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)\n hbox = QHBoxLayout(spacing=0)\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(tab_bar)\n hbox.addWidget(tab_page)\n self.setLayout(hbox)\n\n def getTabBar(self, names):\n tab_bar = QListWidget()\n tab_bar.setStyleSheet(QListWidget_qss())\n tab_bar.setFrameShape(QListWidget.NoFrame)\n tab_bar.setItemAlignment(Qt.AlignCenter)\n tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n for name in names:\n item = QListWidgetItem(name)\n item.setTextAlignment(Qt.AlignCenter)\n item.setSizeHint(QSize(50, 50))\n tab_bar.addItem(item)\n tab_bar.setCurrentRow(0)\n return tab_bar\n\n def getTabPage(self):\n tab_page = QStackedWidget()\n tab_page.addWidget(RecordWidget())\n tab_page.addWidget(RunWidget())\n return tab_page\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(50, 50, 900, 300)\n self.setWindowTitle('AutoMouse')\n self.setCentralWidget(MainCentralWidget())\n self.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef QListWidget_qss():\n return \"\"\"\n QListWidget{\n outline: 0px;\n }\n\n QListWidget {\n min-width: 30px;\n max-width: 50px;\n color: Black;\n background: #CCCCCC;\n }\n\n QListWidget::Item:selected {\n background: #888888;\n border-left: 5px solid red;\n }\n HistoryPanel:hover {\n background: rgb(52, 52, 52);\n }\n \"\"\"\n\n\nclass MainCentralWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n tab_bar = self.getTabBar(('录制', '运行'))\n tab_page = self.getTabPage()\n tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)\n hbox = QHBoxLayout(spacing=0)\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(tab_bar)\n hbox.addWidget(tab_page)\n self.setLayout(hbox)\n\n def getTabBar(self, names):\n tab_bar = QListWidget()\n tab_bar.setStyleSheet(QListWidget_qss())\n tab_bar.setFrameShape(QListWidget.NoFrame)\n tab_bar.setItemAlignment(Qt.AlignCenter)\n tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n for name in names:\n item = QListWidgetItem(name)\n item.setTextAlignment(Qt.AlignCenter)\n item.setSizeHint(QSize(50, 50))\n tab_bar.addItem(item)\n tab_bar.setCurrentRow(0)\n return tab_bar\n\n def getTabPage(self):\n tab_page = QStackedWidget()\n tab_page.addWidget(RecordWidget())\n tab_page.addWidget(RunWidget())\n return tab_page\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(50, 50, 900, 300)\n self.setWindowTitle('AutoMouse')\n self.setCentralWidget(MainCentralWidget())\n self.show()\n\n\n<mask token>\n",
"step-4": "import sys\nfrom collections import namedtuple\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QHBoxLayout, QStackedWidget, QListWidget, QListWidgetItem\nfrom PyQt5.QtCore import Qt, QSize\nfrom runWidget import RunWidget\nfrom recordWidget import RecordWidget\n\n\ndef QListWidget_qss():\n return \"\"\"\n QListWidget{\n outline: 0px;\n }\n\n QListWidget {\n min-width: 30px;\n max-width: 50px;\n color: Black;\n background: #CCCCCC;\n }\n\n QListWidget::Item:selected {\n background: #888888;\n border-left: 5px solid red;\n }\n HistoryPanel:hover {\n background: rgb(52, 52, 52);\n }\n \"\"\"\n\n\nclass MainCentralWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n tab_bar = self.getTabBar(('录制', '运行'))\n tab_page = self.getTabPage()\n tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)\n hbox = QHBoxLayout(spacing=0)\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(tab_bar)\n hbox.addWidget(tab_page)\n self.setLayout(hbox)\n\n def getTabBar(self, names):\n tab_bar = QListWidget()\n tab_bar.setStyleSheet(QListWidget_qss())\n tab_bar.setFrameShape(QListWidget.NoFrame)\n tab_bar.setItemAlignment(Qt.AlignCenter)\n tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n for name in names:\n item = QListWidgetItem(name)\n item.setTextAlignment(Qt.AlignCenter)\n item.setSizeHint(QSize(50, 50))\n tab_bar.addItem(item)\n tab_bar.setCurrentRow(0)\n return tab_bar\n\n def getTabPage(self):\n tab_page = QStackedWidget()\n tab_page.addWidget(RecordWidget())\n tab_page.addWidget(RunWidget())\n return tab_page\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setGeometry(50, 50, 900, 300)\n self.setWindowTitle('AutoMouse')\n self.setCentralWidget(MainCentralWidget())\n self.show()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main_window = MainWindow()\n sys.exit(app.exec_())\n",
"step-5": "import sys\nfrom collections import namedtuple\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, \\\n QHBoxLayout, QStackedWidget, QListWidget, QListWidgetItem\nfrom PyQt5.QtCore import Qt, QSize\n\nfrom runWidget import RunWidget\nfrom recordWidget import RecordWidget\n\n\ndef QListWidget_qss():\n return '''\n QListWidget{\n outline: 0px;\n }\n\n QListWidget {\n min-width: 30px;\n max-width: 50px;\n color: Black;\n background: #CCCCCC;\n }\n\n QListWidget::Item:selected {\n background: #888888;\n border-left: 5px solid red;\n }\n HistoryPanel:hover {\n background: rgb(52, 52, 52);\n }\n '''\n\n\nclass MainCentralWidget(QWidget):\n def __init__(self):\n super().__init__()\n tab_bar = self.getTabBar(('录制', '运行'))\n tab_page = self.getTabPage()\n tab_bar.currentRowChanged.connect(tab_page.setCurrentIndex)\n hbox = QHBoxLayout(spacing=0)\n hbox.setContentsMargins(0, 0, 0, 0)\n hbox.addWidget(tab_bar)\n hbox.addWidget(tab_page)\n self.setLayout(hbox)\n \n def getTabBar(self, names):\n tab_bar = QListWidget()\n tab_bar.setStyleSheet(QListWidget_qss())\n tab_bar.setFrameShape(QListWidget.NoFrame)\n tab_bar.setItemAlignment(Qt.AlignCenter)\n tab_bar.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n for name in names:\n item = QListWidgetItem(name)\n item.setTextAlignment(Qt.AlignCenter)\n item.setSizeHint(QSize(50, 50))\n tab_bar.addItem(item)\n tab_bar.setCurrentRow(0)\n return tab_bar\n\n def getTabPage(self):\n tab_page = QStackedWidget()\n tab_page.addWidget(RecordWidget())\n tab_page.addWidget(RunWidget())\n return tab_page\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(50, 50, 900, 300)\n self.setWindowTitle('AutoMouse')\n self.setCentralWidget(MainCentralWidget())\n self.show()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n main_window = MainWindow()\n sys.exit(app.exec_())",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
#1.문자열에 홑따옴표 포함기키기 : 쌍따옴표
print("Python's Data Type")
#2.문자열에 쌍따옴표 포함시키기 : 홑따옴표
print('"Python is very easy" he said.')
#멀티라인(여러줄)표현하기
#1. 연속된 쌍따옴표 3개 사용하기
print("""No pain
No gain""")
#2. 연속된 쌍따옴표 3개 사용하기
print('''No pain
No gain''')
#3.이스케이프 코드 \n 삽입하기
print("No pain \n No gain")
"""
이스케이프(escape) 문자
\n :new line. 문자열 안에서 줄을 바꿀 때 사용
\t :tap.문자열 사이에 탭만큼의 간격을 줄 때 사용
\\ :문자 \를 그대로 표현할 때 사용
\' :홑따옴표를 그대로 표현할 때 사용
\" :쌍따옴표를 그대로 표현할 때 사용
"""
print("Ha\tHa\tHa")
print("역슬래시 \\")
print("쌍따옴표 \"")
print("홑따옴표 \'")
|
normal
|
{
"blob_id": "eb81f1825c4ac8e20dde1daefbdad22f588e696e",
"index": 9431,
"step-1": "<mask token>\n",
"step-2": "print(\"Python's Data Type\")\nprint('\"Python is very easy\" he said.')\nprint(\"\"\"No pain\n No gain\"\"\")\nprint(\"\"\"No pain\n No gain\"\"\")\nprint(\"\"\"No pain \n No gain\"\"\")\n<mask token>\nprint('Ha\\tHa\\tHa')\nprint('역슬래시 \\\\')\nprint('쌍따옴표 \"')\nprint(\"홑따옴표 '\")\n",
"step-3": "#1.문자열에 홑따옴표 포함기키기 : 쌍따옴표\nprint(\"Python's Data Type\")\n\n#2.문자열에 쌍따옴표 포함시키기 : 홑따옴표\nprint('\"Python is very easy\" he said.')\n\n#멀티라인(여러줄)표현하기\n#1. 연속된 쌍따옴표 3개 사용하기\nprint(\"\"\"No pain\n No gain\"\"\")\n\n#2. 연속된 쌍따옴표 3개 사용하기\nprint('''No pain\n No gain''')\n\n#3.이스케이프 코드 \\n 삽입하기\nprint(\"No pain \\n No gain\")\n\n\"\"\"\n이스케이프(escape) 문자\n\\n :new line. 문자열 안에서 줄을 바꿀 때 사용\n\\t :tap.문자열 사이에 탭만큼의 간격을 줄 때 사용\n\\\\ :문자 \\를 그대로 표현할 때 사용\n\\' :홑따옴표를 그대로 표현할 때 사용\n\\\" :쌍따옴표를 그대로 표현할 때 사용\n\"\"\"\nprint(\"Ha\\tHa\\tHa\")\nprint(\"역슬래시 \\\\\")\nprint(\"쌍따옴표 \\\"\")\nprint(\"홑따옴표 \\'\")\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def setup(bot):
bot.add_cog(EmbedPeek(bot))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__red_end_user_data_statement__ = (
'This cog does not persistently store data or metadata about users.')
def setup(bot):
bot.add_cog(EmbedPeek(bot))
<|reserved_special_token_1|>
from .embedpeek import EmbedPeek
__red_end_user_data_statement__ = (
'This cog does not persistently store data or metadata about users.')
def setup(bot):
bot.add_cog(EmbedPeek(bot))
<|reserved_special_token_1|>
from .embedpeek import EmbedPeek
__red_end_user_data_statement__ = "This cog does not persistently store data or metadata about users."
def setup(bot):
bot.add_cog(EmbedPeek(bot))
|
flexible
|
{
"blob_id": "b66142e0b674d3920b8e3ad74e0d0b753f0a78c3",
"index": 3471,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef setup(bot):\n bot.add_cog(EmbedPeek(bot))\n",
"step-3": "<mask token>\n__red_end_user_data_statement__ = (\n 'This cog does not persistently store data or metadata about users.')\n\n\ndef setup(bot):\n bot.add_cog(EmbedPeek(bot))\n",
"step-4": "from .embedpeek import EmbedPeek\n__red_end_user_data_statement__ = (\n 'This cog does not persistently store data or metadata about users.')\n\n\ndef setup(bot):\n bot.add_cog(EmbedPeek(bot))\n",
"step-5": "from .embedpeek import EmbedPeek\n\n__red_end_user_data_statement__ = \"This cog does not persistently store data or metadata about users.\"\n\n\ndef setup(bot):\n bot.add_cog(EmbedPeek(bot))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from wordcloud import WordCloud, ImageColorGenerator
import numpy as np
from PIL import Image
def word2cloud(text: str, mask_image: Image=None):
if mask_image == None:
wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode='RGBA',
background_color=None).generate(text)
else:
mask = np.array(mask_image) # 使用mask,最好界限分明对比强烈的图形
image_colors = ImageColorGenerator(mask) # 提取蒙版颜色
wc = WordCloud(mask=mask, color_func=image_colors,
width=800, height=600,
font_path='simhei.ttf', mode='RGBA',
background_color=None).generate(text)
img_res = wc.to_image()
return img_res
# 这个大小只是大概,若要精细化,可用结巴统计词频
# freq=jieba.analyse.extract_tags(text, topK=200, withWeight=True)
# freq={w[0]:w[1] for w in freq}
# WordCloud(...).generate_from_frequencies(freq)
# plt.imshow(wc,interpolation='bilinear') # 插值颜色均匀
# plt.axis('off')
# plt.show()
#wc.to_file('wordcloud.png') # 保存
|
normal
|
{
"blob_id": "f9310aa6c26ec10041dac272fa17ac21f74c21ac",
"index": 9326,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode=\n 'RGBA', background_color=None).generate(text)\n else:\n mask = np.array(mask_image)\n image_colors = ImageColorGenerator(mask)\n wc = WordCloud(mask=mask, color_func=image_colors, width=800,\n height=600, font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n",
"step-3": "from wordcloud import WordCloud, ImageColorGenerator\nimport numpy as np\nfrom PIL import Image\n\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode=\n 'RGBA', background_color=None).generate(text)\n else:\n mask = np.array(mask_image)\n image_colors = ImageColorGenerator(mask)\n wc = WordCloud(mask=mask, color_func=image_colors, width=800,\n height=600, font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n",
"step-4": "# -*- coding: utf-8 -*-\nfrom wordcloud import WordCloud, ImageColorGenerator\nimport numpy as np\nfrom PIL import Image\n\ndef word2cloud(text: str, mask_image: Image=None):\n if mask_image == None:\n wc = WordCloud(font_path='simhei.ttf', width=800, height=600, mode='RGBA',\n background_color=None).generate(text)\n else:\n mask = np.array(mask_image) # 使用mask,最好界限分明对比强烈的图形\n image_colors = ImageColorGenerator(mask) # 提取蒙版颜色\n wc = WordCloud(mask=mask, color_func=image_colors,\n width=800, height=600,\n font_path='simhei.ttf', mode='RGBA',\n background_color=None).generate(text)\n img_res = wc.to_image()\n return img_res\n\n\n# 这个大小只是大概,若要精细化,可用结巴统计词频\n# freq=jieba.analyse.extract_tags(text, topK=200, withWeight=True)\n# freq={w[0]:w[1] for w in freq}\n# WordCloud(...).generate_from_frequencies(freq)\n\n# plt.imshow(wc,interpolation='bilinear') # 插值颜色均匀\n# plt.axis('off')\n# plt.show()\n\n#wc.to_file('wordcloud.png') # 保存",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(filename, when='d', interval=7,
backupCount=4)
formatter = logging.Formatter(
'[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
<|reserved_special_token_0|>
def config_inotify_setup(skipWatchingNonExistent: bool
) ->inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug('Watching existing config file {}'.format(p))
i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |
inotify.constants.IN_DELETE)
else:
logger.debug('Watching existing config directory {}'.format(p))
i.add_watch(p)
elif not skipWatchingNonExistent:
d, n = os.path.split(p)
while not os.path.exists(d):
d, n = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.
constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug('Watching directory {} as base for {}'.format(
d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent=True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
_, mask, dir, file = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or dir == p:
reload = True
break
if reload:
break
if reload:
logger.debug('Will reload configuration')
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug('No need to reload configuration')
<|reserved_special_token_0|>
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True), callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=
logger)
fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),
str(img_dir.name))]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams', show_default=True
)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False, resolve_path=True),
show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool,
is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info('Starting watch process')
Config.load_data()
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug('Watching image directory {}'.format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |
IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(filename, when='d', interval=7,
backupCount=4)
formatter = logging.Formatter(
'[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
def needs_update(events):
modified_files = []
for event in list(events):
if re.match('\\d{8}_\\d{2}:\\d{2}', event[3]) or any(k in event[1] for
k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',
'IN_CLOSE_WRITE')):
logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.
format(event[2], event[3], event[1]))
modified_files.append(event)
return modified_files
def config_inotify_setup(skipWatchingNonExistent: bool
) ->inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug('Watching existing config file {}'.format(p))
i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |
inotify.constants.IN_DELETE)
else:
logger.debug('Watching existing config directory {}'.format(p))
i.add_watch(p)
elif not skipWatchingNonExistent:
d, n = os.path.split(p)
while not os.path.exists(d):
d, n = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.
constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug('Watching directory {} as base for {}'.format(
d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent=True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
_, mask, dir, file = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or dir == p:
reload = True
break
if reload:
break
if reload:
logger.debug('Will reload configuration')
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug('No need to reload configuration')
<|reserved_special_token_0|>
def fix_permissions(path):
Path(path).chmod(509)
for root, dirs, files in os.walk(path):
for elem in files:
Path(root, elem).chmod(509)
for elem in dirs:
Path(root, elem).chmod(509)
@click.group()
@click.option('--log-file', default='./lxd-image-server.log', show_default=True
)
@click.option('--verbose', help='Sets log level to debug', is_flag=True,
default=False)
def cli(log_file, verbose):
configure_log(log_file, verbose)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True), callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=
logger)
fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),
str(img_dir.name))]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams', show_default=True
)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False, resolve_path=True),
show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool,
is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info('Starting watch process')
Config.load_data()
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug('Watching image directory {}'.format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |
IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(filename, when='d', interval=7,
backupCount=4)
formatter = logging.Formatter(
'[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
def needs_update(events):
modified_files = []
for event in list(events):
if re.match('\\d{8}_\\d{2}:\\d{2}', event[3]) or any(k in event[1] for
k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',
'IN_CLOSE_WRITE')):
logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.
format(event[2], event[3], event[1]))
modified_files.append(event)
return modified_files
def config_inotify_setup(skipWatchingNonExistent: bool
) ->inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug('Watching existing config file {}'.format(p))
i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |
inotify.constants.IN_DELETE)
else:
logger.debug('Watching existing config directory {}'.format(p))
i.add_watch(p)
elif not skipWatchingNonExistent:
d, n = os.path.split(p)
while not os.path.exists(d):
d, n = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.
constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug('Watching directory {} as base for {}'.format(
d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent=True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
_, mask, dir, file = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or dir == p:
reload = True
break
if reload:
break
if reload:
logger.debug('Will reload configuration')
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug('No need to reload configuration')
@threaded
def update_metadata(img_dir, streams_dir):
MirrorManager.img_dir = img_dir
MirrorManager.update_mirror_list()
while True:
events = event_queue.get()
ops = Operations(events, str(Path(img_dir).resolve()))
if ops:
logger.info('Updating server: %s', ','.join(str(x) for x in ops
.ops))
images = Images(str(Path(streams_dir).resolve()), logger=logger)
images.update(ops.ops)
images.save()
MirrorManager.update()
logger.info('Server updated')
def fix_permissions(path):
Path(path).chmod(509)
for root, dirs, files in os.walk(path):
for elem in files:
Path(root, elem).chmod(509)
for elem in dirs:
Path(root, elem).chmod(509)
@click.group()
@click.option('--log-file', default='./lxd-image-server.log', show_default=True
)
@click.option('--verbose', help='Sets log level to debug', is_flag=True,
default=False)
def cli(log_file, verbose):
configure_log(log_file, verbose)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True), callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=
logger)
fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),
str(img_dir.name))]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams', show_default=True
)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False, resolve_path=True),
show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool,
is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info('Starting watch process')
Config.load_data()
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug('Watching image directory {}'.format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |
IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger('lxd-image-server')
event_queue = queue.Queue()
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(filename, when='d', interval=7,
backupCount=4)
formatter = logging.Formatter(
'[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
def needs_update(events):
modified_files = []
for event in list(events):
if re.match('\\d{8}_\\d{2}:\\d{2}', event[3]) or any(k in event[1] for
k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',
'IN_CLOSE_WRITE')):
logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.
format(event[2], event[3], event[1]))
modified_files.append(event)
return modified_files
def config_inotify_setup(skipWatchingNonExistent: bool
) ->inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug('Watching existing config file {}'.format(p))
i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |
inotify.constants.IN_DELETE)
else:
logger.debug('Watching existing config directory {}'.format(p))
i.add_watch(p)
elif not skipWatchingNonExistent:
d, n = os.path.split(p)
while not os.path.exists(d):
d, n = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.
constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug('Watching directory {} as base for {}'.format(
d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent=True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
_, mask, dir, file = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or dir == p:
reload = True
break
if reload:
break
if reload:
logger.debug('Will reload configuration')
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug('No need to reload configuration')
@threaded
def update_metadata(img_dir, streams_dir):
MirrorManager.img_dir = img_dir
MirrorManager.update_mirror_list()
while True:
events = event_queue.get()
ops = Operations(events, str(Path(img_dir).resolve()))
if ops:
logger.info('Updating server: %s', ','.join(str(x) for x in ops
.ops))
images = Images(str(Path(streams_dir).resolve()), logger=logger)
images.update(ops.ops)
images.save()
MirrorManager.update()
logger.info('Server updated')
def fix_permissions(path):
Path(path).chmod(509)
for root, dirs, files in os.walk(path):
for elem in files:
Path(root, elem).chmod(509)
for elem in dirs:
Path(root, elem).chmod(509)
@click.group()
@click.option('--log-file', default='./lxd-image-server.log', show_default=True
)
@click.option('--verbose', help='Sets log level to debug', is_flag=True,
default=False)
def cli(log_file, verbose):
configure_log(log_file, verbose)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True), callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=
logger)
fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),
str(img_dir.name))]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams', show_default=True
)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True, type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False, resolve_path=True),
show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool,
is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info('Starting watch process')
Config.load_data()
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug('Watching image directory {}'.format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |
IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import sys
import re
import traceback
import logging
import queue
import threading
from logging.handlers import TimedRotatingFileHandler
from pathlib import Path
import click
import inotify.adapters
from inotify.constants import (IN_ATTRIB, IN_DELETE, IN_MOVED_FROM,
IN_MOVED_TO, IN_CLOSE_WRITE)
from lxd_image_server.simplestreams.images import Images
from lxd_image_server.tools.cert import generate_cert
from lxd_image_server.tools.operation import Operations
from lxd_image_server.tools.mirror import MirrorManager
from lxd_image_server.tools.config import Config
logger = logging.getLogger('lxd-image-server')
event_queue = queue.Queue()
def threaded(fn):
def wrapper(*args, **kwargs):
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
return wrapper
def configure_log(log_file, verbose=False):
filename = log_file
if log_file == 'STDOUT':
handler = logging.StreamHandler(sys.stdout)
elif log_file == 'STDERR':
handler = logging.StreamHandler(sys.stderr)
else:
handler = TimedRotatingFileHandler(
filename,
when="d", interval=7, backupCount=4)
formatter = logging.Formatter('[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.setLevel('DEBUG' if verbose else 'INFO')
logger.addHandler(handler)
def needs_update(events):
modified_files = []
for event in list(events):
if re.match('\d{8}_\d{2}:\d{2}', event[3]) or \
any(k in event[1]
for k in ('IN_MOVED_FROM', 'IN_MOVED_TO',
'IN_DELETE', 'IN_CLOSE_WRITE')):
logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'
.format(event[2], event[3], event[1]))
modified_files.append(event)
return modified_files
def config_inotify_setup(skipWatchingNonExistent: bool) -> inotify.adapters.Inotify:
i = inotify.adapters.Inotify()
watchedDirs = {}
for p in Config.paths:
if os.path.exists(p):
if os.path.isfile(p):
logger.debug("Watching existing config file {}".format(p))
i.add_watch(p, mask= inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_DELETE)
else:
logger.debug("Watching existing config directory {}".format(p))
i.add_watch(p) # SEEME: all events?
elif not skipWatchingNonExistent:
(d, n) = os.path.split(p)
while not os.path.exists(d):
(d, n) = os.path.split(d)
if d not in watchedDirs:
i.add_watch(d, inotify.constants.IN_DELETE | inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)
logger.debug("Watching directory {} as base for {}".format(d, p))
watchedDirs[d] = True
return i
@threaded
def update_config(skipWatchingNonExistent = True):
i = config_inotify_setup(skipWatchingNonExistent)
while True:
reload = False
for event in i.event_gen(yield_nones=False):
(_, mask, dir, file) = event
fp = os.path.join(dir, file).rstrip(os.path.sep)
for p in Config.paths:
if p == fp or (dir == p):
reload = True
break
if reload:
break
if reload:
logger.debug("Will reload configuration")
Config.reload_data()
i = config_inotify_setup()
MirrorManager.update_mirror_list()
else:
logger.debug("No need to reload configuration")
@threaded
def update_metadata(img_dir, streams_dir):
MirrorManager.img_dir = img_dir
MirrorManager.update_mirror_list()
while True:
events = event_queue.get()
ops = Operations(events, str(Path(img_dir).resolve()))
if ops:
logger.info('Updating server: %s', ','.join(
str(x) for x in ops.ops))
images = Images(str(Path(streams_dir).resolve()), logger=logger)
images.update(ops.ops)
images.save()
MirrorManager.update()
logger.info('Server updated')
def fix_permissions(path):
Path(path).chmod(0o775)
for root, dirs, files in os.walk(path):
for elem in files:
Path(root, elem).chmod(0o775)
for elem in dirs:
Path(root, elem).chmod(0o775)
@click.group()
@click.option('--log-file', default='./lxd-image-server.log',
show_default=True)
@click.option('--verbose', help='Sets log level to debug',
is_flag=True, default=False)
def cli(log_file, verbose):
configure_log(log_file, verbose)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True),
callback=lambda ctx, param, val: Path(val))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.pass_context
def update(ctx, img_dir, streams_dir):
logger.info('Updating server')
img_dir = Path(img_dir).expanduser().resolve()
streams_dir = Path(streams_dir).expanduser().resolve()
images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=logger)
# Generate a fake event to update all tree
fake_events = [
(None, ['IN_ISDIR', 'IN_CREATE'],
str(img_dir.parent), str(img_dir.name))
]
operations = Operations(fake_events, str(img_dir))
images.update(operations.ops)
images.save()
logger.info('Server updated')
@cli.command()
@click.option('--root_dir', default='/var/www/simplestreams',
show_default=True)
@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,
callback=lambda ctx, param, val: Path(val))
@click.option('--ssl_skip', default=False, is_flag=True)
@click.option('--nginx_skip', default=False, is_flag=True)
@click.pass_context
def init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):
root_dir = Path(root_dir).expanduser().resolve()
if not Path(root_dir).exists():
logger.error('Root directory does not exists')
else:
if nginx_skip:
ssl_skip = True
if not ssl_skip:
if not ssl_dir.exists():
os.makedirs(str(ssl_dir))
if not (ssl_dir / 'nginx.key').exists():
generate_cert(str(ssl_dir))
img_dir = str(Path(root_dir, 'images'))
streams_dir = str(Path(root_dir, 'streams/v1'))
if not Path(img_dir).exists():
os.makedirs(img_dir)
if not Path(streams_dir).exists():
os.makedirs(streams_dir)
if not nginx_skip:
conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')
if not conf_path.exists():
conf_path.symlink_to(
'/etc/nginx/sites-available/simplestreams.conf')
os.system('nginx -s reload')
if not Path(root_dir, 'streams', 'v1', 'images.json').exists():
ctx.invoke(update, img_dir=Path(root_dir, 'images'),
streams_dir=Path(root_dir, 'streams', 'v1'))
fix_permissions(img_dir)
fix_permissions(streams_dir)
@cli.command()
@click.option('--img_dir', default='/var/www/simplestreams/images',
show_default=True,
type=click.Path(exists=True, file_okay=False,
resolve_path=True))
@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',
type=click.Path(exists=True, file_okay=False,
resolve_path=True), show_default=True)
@click.option('--skip-watch-config-non-existent', default=False, type=bool, is_flag=True)
@click.pass_context
def watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):
path_img_dir = str(Path(img_dir).expanduser().resolve())
path_streams_dir = str(Path(streams_dir).expanduser().resolve())
logger.info("Starting watch process")
Config.load_data()
# Lauch threads
# SEEME: in case an event will come from watching config files, there is a race condition between update_config
# thread using indirectly MirrorManager.img_dir and thread update_metadata setting MirrorManager.img_dir
# Also, race condition on calling MirrorManager.update_mirror_list() in both threads.
update_config(skip_watch_config_non_existent)
update_metadata(path_img_dir, path_streams_dir)
logger.debug("Watching image directory {}".format(path_img_dir))
i = inotify.adapters.InotifyTree(path_img_dir,
mask=(IN_ATTRIB | IN_DELETE |
IN_MOVED_FROM | IN_MOVED_TO |
IN_CLOSE_WRITE))
while True:
events = i.event_gen(yield_nones=False, timeout_s=15)
files_changed = needs_update(events)
if files_changed:
event_queue.put(files_changed)
def main():
try:
sys.exit(cli())
except Exception:
logger.error(traceback.format_exc())
sys.exit(1)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "3a96ede91069df0c71905415e598dbbd9d3056fd",
"index": 9730,
"step-1": "<mask token>\n\n\ndef configure_log(log_file, verbose=False):\n filename = log_file\n if log_file == 'STDOUT':\n handler = logging.StreamHandler(sys.stdout)\n elif log_file == 'STDERR':\n handler = logging.StreamHandler(sys.stderr)\n else:\n handler = TimedRotatingFileHandler(filename, when='d', interval=7,\n backupCount=4)\n formatter = logging.Formatter(\n '[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.setLevel('DEBUG' if verbose else 'INFO')\n logger.addHandler(handler)\n\n\n<mask token>\n\n\ndef config_inotify_setup(skipWatchingNonExistent: bool\n ) ->inotify.adapters.Inotify:\n i = inotify.adapters.Inotify()\n watchedDirs = {}\n for p in Config.paths:\n if os.path.exists(p):\n if os.path.isfile(p):\n logger.debug('Watching existing config file {}'.format(p))\n i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |\n inotify.constants.IN_DELETE)\n else:\n logger.debug('Watching existing config directory {}'.format(p))\n i.add_watch(p)\n elif not skipWatchingNonExistent:\n d, n = os.path.split(p)\n while not os.path.exists(d):\n d, n = os.path.split(d)\n if d not in watchedDirs:\n i.add_watch(d, inotify.constants.IN_DELETE | inotify.\n constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)\n logger.debug('Watching directory {} as base for {}'.format(\n d, p))\n watchedDirs[d] = True\n return i\n\n\n@threaded\ndef update_config(skipWatchingNonExistent=True):\n i = config_inotify_setup(skipWatchingNonExistent)\n while True:\n reload = False\n for event in i.event_gen(yield_nones=False):\n _, mask, dir, file = event\n fp = os.path.join(dir, file).rstrip(os.path.sep)\n for p in Config.paths:\n if p == fp or dir == p:\n reload = True\n break\n if reload:\n break\n if reload:\n logger.debug('Will reload configuration')\n Config.reload_data()\n i = config_inotify_setup()\n MirrorManager.update_mirror_list()\n else:\n logger.debug('No need to reload configuration')\n\n\n<mask token>\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True), callback=lambda ctx, param, val: Path(val))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.pass_context\ndef update(ctx, img_dir, streams_dir):\n logger.info('Updating server')\n img_dir = Path(img_dir).expanduser().resolve()\n streams_dir = Path(streams_dir).expanduser().resolve()\n images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=\n logger)\n fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),\n str(img_dir.name))]\n operations = Operations(fake_events, str(img_dir))\n images.update(operations.ops)\n images.save()\n logger.info('Server updated')\n\n\n@cli.command()\n@click.option('--root_dir', default='/var/www/simplestreams', show_default=True\n )\n@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,\n callback=lambda ctx, param, val: Path(val))\n@click.option('--ssl_skip', default=False, is_flag=True)\n@click.option('--nginx_skip', default=False, is_flag=True)\n@click.pass_context\ndef init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):\n root_dir = Path(root_dir).expanduser().resolve()\n if not Path(root_dir).exists():\n logger.error('Root directory does not exists')\n else:\n if nginx_skip:\n ssl_skip = True\n if not ssl_skip:\n if not ssl_dir.exists():\n os.makedirs(str(ssl_dir))\n if not (ssl_dir / 'nginx.key').exists():\n generate_cert(str(ssl_dir))\n img_dir = str(Path(root_dir, 'images'))\n streams_dir = str(Path(root_dir, 'streams/v1'))\n if not Path(img_dir).exists():\n os.makedirs(img_dir)\n if not Path(streams_dir).exists():\n os.makedirs(streams_dir)\n if not nginx_skip:\n conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')\n if not conf_path.exists():\n conf_path.symlink_to(\n '/etc/nginx/sites-available/simplestreams.conf')\n os.system('nginx -s reload')\n if not Path(root_dir, 'streams', 'v1', 'images.json').exists():\n ctx.invoke(update, img_dir=Path(root_dir, 'images'),\n streams_dir=Path(root_dir, 'streams', 'v1'))\n fix_permissions(img_dir)\n fix_permissions(streams_dir)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n type=click.Path(exists=True, file_okay=False, resolve_path=True),\n show_default=True)\n@click.option('--skip-watch-config-non-existent', default=False, type=bool,\n is_flag=True)\n@click.pass_context\ndef watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):\n path_img_dir = str(Path(img_dir).expanduser().resolve())\n path_streams_dir = str(Path(streams_dir).expanduser().resolve())\n logger.info('Starting watch process')\n Config.load_data()\n update_config(skip_watch_config_non_existent)\n update_metadata(path_img_dir, path_streams_dir)\n logger.debug('Watching image directory {}'.format(path_img_dir))\n i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |\n IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=15)\n files_changed = needs_update(events)\n if files_changed:\n event_queue.put(files_changed)\n\n\ndef main():\n try:\n sys.exit(cli())\n except Exception:\n logger.error(traceback.format_exc())\n sys.exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n threading.Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper\n\n\ndef configure_log(log_file, verbose=False):\n filename = log_file\n if log_file == 'STDOUT':\n handler = logging.StreamHandler(sys.stdout)\n elif log_file == 'STDERR':\n handler = logging.StreamHandler(sys.stderr)\n else:\n handler = TimedRotatingFileHandler(filename, when='d', interval=7,\n backupCount=4)\n formatter = logging.Formatter(\n '[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.setLevel('DEBUG' if verbose else 'INFO')\n logger.addHandler(handler)\n\n\ndef needs_update(events):\n modified_files = []\n for event in list(events):\n if re.match('\\\\d{8}_\\\\d{2}:\\\\d{2}', event[3]) or any(k in event[1] for\n k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',\n 'IN_CLOSE_WRITE')):\n logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.\n format(event[2], event[3], event[1]))\n modified_files.append(event)\n return modified_files\n\n\ndef config_inotify_setup(skipWatchingNonExistent: bool\n ) ->inotify.adapters.Inotify:\n i = inotify.adapters.Inotify()\n watchedDirs = {}\n for p in Config.paths:\n if os.path.exists(p):\n if os.path.isfile(p):\n logger.debug('Watching existing config file {}'.format(p))\n i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |\n inotify.constants.IN_DELETE)\n else:\n logger.debug('Watching existing config directory {}'.format(p))\n i.add_watch(p)\n elif not skipWatchingNonExistent:\n d, n = os.path.split(p)\n while not os.path.exists(d):\n d, n = os.path.split(d)\n if d not in watchedDirs:\n i.add_watch(d, inotify.constants.IN_DELETE | inotify.\n constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)\n logger.debug('Watching directory {} as base for {}'.format(\n d, p))\n watchedDirs[d] = True\n return i\n\n\n@threaded\ndef update_config(skipWatchingNonExistent=True):\n i = config_inotify_setup(skipWatchingNonExistent)\n while True:\n reload = False\n for event in i.event_gen(yield_nones=False):\n _, mask, dir, file = event\n fp = os.path.join(dir, file).rstrip(os.path.sep)\n for p in Config.paths:\n if p == fp or dir == p:\n reload = True\n break\n if reload:\n break\n if reload:\n logger.debug('Will reload configuration')\n Config.reload_data()\n i = config_inotify_setup()\n MirrorManager.update_mirror_list()\n else:\n logger.debug('No need to reload configuration')\n\n\n<mask token>\n\n\ndef fix_permissions(path):\n Path(path).chmod(509)\n for root, dirs, files in os.walk(path):\n for elem in files:\n Path(root, elem).chmod(509)\n for elem in dirs:\n Path(root, elem).chmod(509)\n\n\n@click.group()\n@click.option('--log-file', default='./lxd-image-server.log', show_default=True\n )\n@click.option('--verbose', help='Sets log level to debug', is_flag=True,\n default=False)\ndef cli(log_file, verbose):\n configure_log(log_file, verbose)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True), callback=lambda ctx, param, val: Path(val))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.pass_context\ndef update(ctx, img_dir, streams_dir):\n logger.info('Updating server')\n img_dir = Path(img_dir).expanduser().resolve()\n streams_dir = Path(streams_dir).expanduser().resolve()\n images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=\n logger)\n fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),\n str(img_dir.name))]\n operations = Operations(fake_events, str(img_dir))\n images.update(operations.ops)\n images.save()\n logger.info('Server updated')\n\n\n@cli.command()\n@click.option('--root_dir', default='/var/www/simplestreams', show_default=True\n )\n@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,\n callback=lambda ctx, param, val: Path(val))\n@click.option('--ssl_skip', default=False, is_flag=True)\n@click.option('--nginx_skip', default=False, is_flag=True)\n@click.pass_context\ndef init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):\n root_dir = Path(root_dir).expanduser().resolve()\n if not Path(root_dir).exists():\n logger.error('Root directory does not exists')\n else:\n if nginx_skip:\n ssl_skip = True\n if not ssl_skip:\n if not ssl_dir.exists():\n os.makedirs(str(ssl_dir))\n if not (ssl_dir / 'nginx.key').exists():\n generate_cert(str(ssl_dir))\n img_dir = str(Path(root_dir, 'images'))\n streams_dir = str(Path(root_dir, 'streams/v1'))\n if not Path(img_dir).exists():\n os.makedirs(img_dir)\n if not Path(streams_dir).exists():\n os.makedirs(streams_dir)\n if not nginx_skip:\n conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')\n if not conf_path.exists():\n conf_path.symlink_to(\n '/etc/nginx/sites-available/simplestreams.conf')\n os.system('nginx -s reload')\n if not Path(root_dir, 'streams', 'v1', 'images.json').exists():\n ctx.invoke(update, img_dir=Path(root_dir, 'images'),\n streams_dir=Path(root_dir, 'streams', 'v1'))\n fix_permissions(img_dir)\n fix_permissions(streams_dir)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n type=click.Path(exists=True, file_okay=False, resolve_path=True),\n show_default=True)\n@click.option('--skip-watch-config-non-existent', default=False, type=bool,\n is_flag=True)\n@click.pass_context\ndef watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):\n path_img_dir = str(Path(img_dir).expanduser().resolve())\n path_streams_dir = str(Path(streams_dir).expanduser().resolve())\n logger.info('Starting watch process')\n Config.load_data()\n update_config(skip_watch_config_non_existent)\n update_metadata(path_img_dir, path_streams_dir)\n logger.debug('Watching image directory {}'.format(path_img_dir))\n i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |\n IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=15)\n files_changed = needs_update(events)\n if files_changed:\n event_queue.put(files_changed)\n\n\ndef main():\n try:\n sys.exit(cli())\n except Exception:\n logger.error(traceback.format_exc())\n sys.exit(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n threading.Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper\n\n\ndef configure_log(log_file, verbose=False):\n filename = log_file\n if log_file == 'STDOUT':\n handler = logging.StreamHandler(sys.stdout)\n elif log_file == 'STDERR':\n handler = logging.StreamHandler(sys.stderr)\n else:\n handler = TimedRotatingFileHandler(filename, when='d', interval=7,\n backupCount=4)\n formatter = logging.Formatter(\n '[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.setLevel('DEBUG' if verbose else 'INFO')\n logger.addHandler(handler)\n\n\ndef needs_update(events):\n modified_files = []\n for event in list(events):\n if re.match('\\\\d{8}_\\\\d{2}:\\\\d{2}', event[3]) or any(k in event[1] for\n k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',\n 'IN_CLOSE_WRITE')):\n logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.\n format(event[2], event[3], event[1]))\n modified_files.append(event)\n return modified_files\n\n\ndef config_inotify_setup(skipWatchingNonExistent: bool\n ) ->inotify.adapters.Inotify:\n i = inotify.adapters.Inotify()\n watchedDirs = {}\n for p in Config.paths:\n if os.path.exists(p):\n if os.path.isfile(p):\n logger.debug('Watching existing config file {}'.format(p))\n i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |\n inotify.constants.IN_DELETE)\n else:\n logger.debug('Watching existing config directory {}'.format(p))\n i.add_watch(p)\n elif not skipWatchingNonExistent:\n d, n = os.path.split(p)\n while not os.path.exists(d):\n d, n = os.path.split(d)\n if d not in watchedDirs:\n i.add_watch(d, inotify.constants.IN_DELETE | inotify.\n constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)\n logger.debug('Watching directory {} as base for {}'.format(\n d, p))\n watchedDirs[d] = True\n return i\n\n\n@threaded\ndef update_config(skipWatchingNonExistent=True):\n i = config_inotify_setup(skipWatchingNonExistent)\n while True:\n reload = False\n for event in i.event_gen(yield_nones=False):\n _, mask, dir, file = event\n fp = os.path.join(dir, file).rstrip(os.path.sep)\n for p in Config.paths:\n if p == fp or dir == p:\n reload = True\n break\n if reload:\n break\n if reload:\n logger.debug('Will reload configuration')\n Config.reload_data()\n i = config_inotify_setup()\n MirrorManager.update_mirror_list()\n else:\n logger.debug('No need to reload configuration')\n\n\n@threaded\ndef update_metadata(img_dir, streams_dir):\n MirrorManager.img_dir = img_dir\n MirrorManager.update_mirror_list()\n while True:\n events = event_queue.get()\n ops = Operations(events, str(Path(img_dir).resolve()))\n if ops:\n logger.info('Updating server: %s', ','.join(str(x) for x in ops\n .ops))\n images = Images(str(Path(streams_dir).resolve()), logger=logger)\n images.update(ops.ops)\n images.save()\n MirrorManager.update()\n logger.info('Server updated')\n\n\ndef fix_permissions(path):\n Path(path).chmod(509)\n for root, dirs, files in os.walk(path):\n for elem in files:\n Path(root, elem).chmod(509)\n for elem in dirs:\n Path(root, elem).chmod(509)\n\n\n@click.group()\n@click.option('--log-file', default='./lxd-image-server.log', show_default=True\n )\n@click.option('--verbose', help='Sets log level to debug', is_flag=True,\n default=False)\ndef cli(log_file, verbose):\n configure_log(log_file, verbose)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True), callback=lambda ctx, param, val: Path(val))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.pass_context\ndef update(ctx, img_dir, streams_dir):\n logger.info('Updating server')\n img_dir = Path(img_dir).expanduser().resolve()\n streams_dir = Path(streams_dir).expanduser().resolve()\n images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=\n logger)\n fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),\n str(img_dir.name))]\n operations = Operations(fake_events, str(img_dir))\n images.update(operations.ops)\n images.save()\n logger.info('Server updated')\n\n\n@cli.command()\n@click.option('--root_dir', default='/var/www/simplestreams', show_default=True\n )\n@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,\n callback=lambda ctx, param, val: Path(val))\n@click.option('--ssl_skip', default=False, is_flag=True)\n@click.option('--nginx_skip', default=False, is_flag=True)\n@click.pass_context\ndef init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):\n root_dir = Path(root_dir).expanduser().resolve()\n if not Path(root_dir).exists():\n logger.error('Root directory does not exists')\n else:\n if nginx_skip:\n ssl_skip = True\n if not ssl_skip:\n if not ssl_dir.exists():\n os.makedirs(str(ssl_dir))\n if not (ssl_dir / 'nginx.key').exists():\n generate_cert(str(ssl_dir))\n img_dir = str(Path(root_dir, 'images'))\n streams_dir = str(Path(root_dir, 'streams/v1'))\n if not Path(img_dir).exists():\n os.makedirs(img_dir)\n if not Path(streams_dir).exists():\n os.makedirs(streams_dir)\n if not nginx_skip:\n conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')\n if not conf_path.exists():\n conf_path.symlink_to(\n '/etc/nginx/sites-available/simplestreams.conf')\n os.system('nginx -s reload')\n if not Path(root_dir, 'streams', 'v1', 'images.json').exists():\n ctx.invoke(update, img_dir=Path(root_dir, 'images'),\n streams_dir=Path(root_dir, 'streams', 'v1'))\n fix_permissions(img_dir)\n fix_permissions(streams_dir)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n type=click.Path(exists=True, file_okay=False, resolve_path=True),\n show_default=True)\n@click.option('--skip-watch-config-non-existent', default=False, type=bool,\n is_flag=True)\n@click.pass_context\ndef watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):\n path_img_dir = str(Path(img_dir).expanduser().resolve())\n path_streams_dir = str(Path(streams_dir).expanduser().resolve())\n logger.info('Starting watch process')\n Config.load_data()\n update_config(skip_watch_config_non_existent)\n update_metadata(path_img_dir, path_streams_dir)\n logger.debug('Watching image directory {}'.format(path_img_dir))\n i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |\n IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=15)\n files_changed = needs_update(events)\n if files_changed:\n event_queue.put(files_changed)\n\n\ndef main():\n try:\n sys.exit(cli())\n except Exception:\n logger.error(traceback.format_exc())\n sys.exit(1)\n\n\n<mask token>\n",
"step-4": "<mask token>\nlogger = logging.getLogger('lxd-image-server')\nevent_queue = queue.Queue()\n\n\ndef threaded(fn):\n\n def wrapper(*args, **kwargs):\n threading.Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper\n\n\ndef configure_log(log_file, verbose=False):\n filename = log_file\n if log_file == 'STDOUT':\n handler = logging.StreamHandler(sys.stdout)\n elif log_file == 'STDERR':\n handler = logging.StreamHandler(sys.stderr)\n else:\n handler = TimedRotatingFileHandler(filename, when='d', interval=7,\n backupCount=4)\n formatter = logging.Formatter(\n '[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logger.setLevel('DEBUG' if verbose else 'INFO')\n logger.addHandler(handler)\n\n\ndef needs_update(events):\n modified_files = []\n for event in list(events):\n if re.match('\\\\d{8}_\\\\d{2}:\\\\d{2}', event[3]) or any(k in event[1] for\n k in ('IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_DELETE',\n 'IN_CLOSE_WRITE')):\n logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'.\n format(event[2], event[3], event[1]))\n modified_files.append(event)\n return modified_files\n\n\ndef config_inotify_setup(skipWatchingNonExistent: bool\n ) ->inotify.adapters.Inotify:\n i = inotify.adapters.Inotify()\n watchedDirs = {}\n for p in Config.paths:\n if os.path.exists(p):\n if os.path.isfile(p):\n logger.debug('Watching existing config file {}'.format(p))\n i.add_watch(p, mask=inotify.constants.IN_CLOSE_WRITE |\n inotify.constants.IN_DELETE)\n else:\n logger.debug('Watching existing config directory {}'.format(p))\n i.add_watch(p)\n elif not skipWatchingNonExistent:\n d, n = os.path.split(p)\n while not os.path.exists(d):\n d, n = os.path.split(d)\n if d not in watchedDirs:\n i.add_watch(d, inotify.constants.IN_DELETE | inotify.\n constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)\n logger.debug('Watching directory {} as base for {}'.format(\n d, p))\n watchedDirs[d] = True\n return i\n\n\n@threaded\ndef update_config(skipWatchingNonExistent=True):\n i = config_inotify_setup(skipWatchingNonExistent)\n while True:\n reload = False\n for event in i.event_gen(yield_nones=False):\n _, mask, dir, file = event\n fp = os.path.join(dir, file).rstrip(os.path.sep)\n for p in Config.paths:\n if p == fp or dir == p:\n reload = True\n break\n if reload:\n break\n if reload:\n logger.debug('Will reload configuration')\n Config.reload_data()\n i = config_inotify_setup()\n MirrorManager.update_mirror_list()\n else:\n logger.debug('No need to reload configuration')\n\n\n@threaded\ndef update_metadata(img_dir, streams_dir):\n MirrorManager.img_dir = img_dir\n MirrorManager.update_mirror_list()\n while True:\n events = event_queue.get()\n ops = Operations(events, str(Path(img_dir).resolve()))\n if ops:\n logger.info('Updating server: %s', ','.join(str(x) for x in ops\n .ops))\n images = Images(str(Path(streams_dir).resolve()), logger=logger)\n images.update(ops.ops)\n images.save()\n MirrorManager.update()\n logger.info('Server updated')\n\n\ndef fix_permissions(path):\n Path(path).chmod(509)\n for root, dirs, files in os.walk(path):\n for elem in files:\n Path(root, elem).chmod(509)\n for elem in dirs:\n Path(root, elem).chmod(509)\n\n\n@click.group()\n@click.option('--log-file', default='./lxd-image-server.log', show_default=True\n )\n@click.option('--verbose', help='Sets log level to debug', is_flag=True,\n default=False)\ndef cli(log_file, verbose):\n configure_log(log_file, verbose)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True), callback=lambda ctx, param, val: Path(val))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.pass_context\ndef update(ctx, img_dir, streams_dir):\n logger.info('Updating server')\n img_dir = Path(img_dir).expanduser().resolve()\n streams_dir = Path(streams_dir).expanduser().resolve()\n images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=\n logger)\n fake_events = [(None, ['IN_ISDIR', 'IN_CREATE'], str(img_dir.parent),\n str(img_dir.name))]\n operations = Operations(fake_events, str(img_dir))\n images.update(operations.ops)\n images.save()\n logger.info('Server updated')\n\n\n@cli.command()\n@click.option('--root_dir', default='/var/www/simplestreams', show_default=True\n )\n@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,\n callback=lambda ctx, param, val: Path(val))\n@click.option('--ssl_skip', default=False, is_flag=True)\n@click.option('--nginx_skip', default=False, is_flag=True)\n@click.pass_context\ndef init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):\n root_dir = Path(root_dir).expanduser().resolve()\n if not Path(root_dir).exists():\n logger.error('Root directory does not exists')\n else:\n if nginx_skip:\n ssl_skip = True\n if not ssl_skip:\n if not ssl_dir.exists():\n os.makedirs(str(ssl_dir))\n if not (ssl_dir / 'nginx.key').exists():\n generate_cert(str(ssl_dir))\n img_dir = str(Path(root_dir, 'images'))\n streams_dir = str(Path(root_dir, 'streams/v1'))\n if not Path(img_dir).exists():\n os.makedirs(img_dir)\n if not Path(streams_dir).exists():\n os.makedirs(streams_dir)\n if not nginx_skip:\n conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')\n if not conf_path.exists():\n conf_path.symlink_to(\n '/etc/nginx/sites-available/simplestreams.conf')\n os.system('nginx -s reload')\n if not Path(root_dir, 'streams', 'v1', 'images.json').exists():\n ctx.invoke(update, img_dir=Path(root_dir, 'images'),\n streams_dir=Path(root_dir, 'streams', 'v1'))\n fix_permissions(img_dir)\n fix_permissions(streams_dir)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True, type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n type=click.Path(exists=True, file_okay=False, resolve_path=True),\n show_default=True)\n@click.option('--skip-watch-config-non-existent', default=False, type=bool,\n is_flag=True)\n@click.pass_context\ndef watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):\n path_img_dir = str(Path(img_dir).expanduser().resolve())\n path_streams_dir = str(Path(streams_dir).expanduser().resolve())\n logger.info('Starting watch process')\n Config.load_data()\n update_config(skip_watch_config_non_existent)\n update_metadata(path_img_dir, path_streams_dir)\n logger.debug('Watching image directory {}'.format(path_img_dir))\n i = inotify.adapters.InotifyTree(path_img_dir, mask=IN_ATTRIB |\n IN_DELETE | IN_MOVED_FROM | IN_MOVED_TO | IN_CLOSE_WRITE)\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=15)\n files_changed = needs_update(events)\n if files_changed:\n event_queue.put(files_changed)\n\n\ndef main():\n try:\n sys.exit(cli())\n except Exception:\n logger.error(traceback.format_exc())\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\nimport sys\nimport re\nimport traceback\nimport logging\nimport queue\nimport threading\nfrom logging.handlers import TimedRotatingFileHandler\nfrom pathlib import Path\nimport click\nimport inotify.adapters\nfrom inotify.constants import (IN_ATTRIB, IN_DELETE, IN_MOVED_FROM,\n IN_MOVED_TO, IN_CLOSE_WRITE)\nfrom lxd_image_server.simplestreams.images import Images\nfrom lxd_image_server.tools.cert import generate_cert\nfrom lxd_image_server.tools.operation import Operations\nfrom lxd_image_server.tools.mirror import MirrorManager\nfrom lxd_image_server.tools.config import Config\n\n\nlogger = logging.getLogger('lxd-image-server')\nevent_queue = queue.Queue()\n\ndef threaded(fn):\n def wrapper(*args, **kwargs):\n threading.Thread(target=fn, args=args, kwargs=kwargs).start()\n return wrapper\n\n\ndef configure_log(log_file, verbose=False):\n filename = log_file\n\n if log_file == 'STDOUT':\n handler = logging.StreamHandler(sys.stdout)\n elif log_file == 'STDERR':\n handler = logging.StreamHandler(sys.stderr)\n else:\n handler = TimedRotatingFileHandler(\n filename,\n when=\"d\", interval=7, backupCount=4)\n formatter = logging.Formatter('[%(asctime)s] [LxdImgServer] [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n\n logger.setLevel('DEBUG' if verbose else 'INFO')\n logger.addHandler(handler)\n\n\ndef needs_update(events):\n modified_files = []\n for event in list(events):\n if re.match('\\d{8}_\\d{2}:\\d{2}', event[3]) or \\\n any(k in event[1]\n for k in ('IN_MOVED_FROM', 'IN_MOVED_TO',\n 'IN_DELETE', 'IN_CLOSE_WRITE')):\n logger.debug('Event: PATH=[{}] FILENAME=[{}] EVENT_TYPES={}'\n .format(event[2], event[3], event[1]))\n modified_files.append(event)\n\n return modified_files\n\n\n\ndef config_inotify_setup(skipWatchingNonExistent: bool) -> inotify.adapters.Inotify:\n i = inotify.adapters.Inotify()\n watchedDirs = {}\n\n for p in Config.paths:\n if os.path.exists(p):\n if os.path.isfile(p):\n logger.debug(\"Watching existing config file {}\".format(p))\n i.add_watch(p, mask= inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_DELETE)\n else:\n logger.debug(\"Watching existing config directory {}\".format(p))\n i.add_watch(p) # SEEME: all events?\n elif not skipWatchingNonExistent:\n (d, n) = os.path.split(p)\n while not os.path.exists(d):\n (d, n) = os.path.split(d)\n if d not in watchedDirs:\n i.add_watch(d, inotify.constants.IN_DELETE | inotify.constants.IN_CLOSE_WRITE | inotify.constants.IN_CREATE)\n logger.debug(\"Watching directory {} as base for {}\".format(d, p))\n watchedDirs[d] = True\n\n return i\n\n@threaded\ndef update_config(skipWatchingNonExistent = True):\n i = config_inotify_setup(skipWatchingNonExistent)\n while True:\n reload = False\n for event in i.event_gen(yield_nones=False):\n (_, mask, dir, file) = event\n fp = os.path.join(dir, file).rstrip(os.path.sep)\n for p in Config.paths:\n if p == fp or (dir == p):\n reload = True\n break\n if reload:\n break\n\n if reload:\n logger.debug(\"Will reload configuration\")\n Config.reload_data()\n i = config_inotify_setup()\n MirrorManager.update_mirror_list()\n else:\n logger.debug(\"No need to reload configuration\")\n\n\n@threaded\ndef update_metadata(img_dir, streams_dir):\n MirrorManager.img_dir = img_dir\n MirrorManager.update_mirror_list()\n while True:\n events = event_queue.get()\n ops = Operations(events, str(Path(img_dir).resolve()))\n if ops:\n logger.info('Updating server: %s', ','.join(\n str(x) for x in ops.ops))\n images = Images(str(Path(streams_dir).resolve()), logger=logger)\n images.update(ops.ops)\n images.save()\n MirrorManager.update()\n logger.info('Server updated')\n\n\ndef fix_permissions(path):\n Path(path).chmod(0o775)\n for root, dirs, files in os.walk(path):\n for elem in files:\n Path(root, elem).chmod(0o775)\n for elem in dirs:\n Path(root, elem).chmod(0o775)\n\n\n@click.group()\n@click.option('--log-file', default='./lxd-image-server.log',\n show_default=True)\n@click.option('--verbose', help='Sets log level to debug',\n is_flag=True, default=False)\ndef cli(log_file, verbose):\n configure_log(log_file, verbose)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True,\n type=click.Path(exists=True, file_okay=False,\n resolve_path=True),\n callback=lambda ctx, param, val: Path(val))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n show_default=True,\n type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.pass_context\ndef update(ctx, img_dir, streams_dir):\n logger.info('Updating server')\n\n img_dir = Path(img_dir).expanduser().resolve()\n streams_dir = Path(streams_dir).expanduser().resolve()\n\n images = Images(str(Path(streams_dir).resolve()), rebuild=True, logger=logger)\n\n # Generate a fake event to update all tree\n fake_events = [\n (None, ['IN_ISDIR', 'IN_CREATE'],\n str(img_dir.parent), str(img_dir.name))\n ]\n operations = Operations(fake_events, str(img_dir))\n images.update(operations.ops)\n images.save()\n\n logger.info('Server updated')\n\n\n@cli.command()\n@click.option('--root_dir', default='/var/www/simplestreams',\n show_default=True)\n@click.option('--ssl_dir', default='/etc/nginx/ssl', show_default=True,\n callback=lambda ctx, param, val: Path(val))\n@click.option('--ssl_skip', default=False, is_flag=True)\n@click.option('--nginx_skip', default=False, is_flag=True)\n@click.pass_context\ndef init(ctx, root_dir, ssl_dir, ssl_skip, nginx_skip):\n root_dir = Path(root_dir).expanduser().resolve()\n\n if not Path(root_dir).exists():\n logger.error('Root directory does not exists')\n else:\n if nginx_skip:\n ssl_skip = True\n\n if not ssl_skip:\n if not ssl_dir.exists():\n os.makedirs(str(ssl_dir))\n\n if not (ssl_dir / 'nginx.key').exists():\n generate_cert(str(ssl_dir))\n\n img_dir = str(Path(root_dir, 'images'))\n streams_dir = str(Path(root_dir, 'streams/v1'))\n if not Path(img_dir).exists():\n os.makedirs(img_dir)\n if not Path(streams_dir).exists():\n os.makedirs(streams_dir)\n\n if not nginx_skip:\n conf_path = Path('/etc/nginx/sites-enabled/simplestreams.conf')\n if not conf_path.exists():\n conf_path.symlink_to(\n '/etc/nginx/sites-available/simplestreams.conf')\n os.system('nginx -s reload')\n\n if not Path(root_dir, 'streams', 'v1', 'images.json').exists():\n ctx.invoke(update, img_dir=Path(root_dir, 'images'),\n streams_dir=Path(root_dir, 'streams', 'v1'))\n\n fix_permissions(img_dir)\n fix_permissions(streams_dir)\n\n\n@cli.command()\n@click.option('--img_dir', default='/var/www/simplestreams/images',\n show_default=True,\n type=click.Path(exists=True, file_okay=False,\n resolve_path=True))\n@click.option('--streams_dir', default='/var/www/simplestreams/streams/v1',\n type=click.Path(exists=True, file_okay=False,\n resolve_path=True), show_default=True)\n@click.option('--skip-watch-config-non-existent', default=False, type=bool, is_flag=True)\n@click.pass_context\ndef watch(ctx, img_dir, streams_dir, skip_watch_config_non_existent: bool):\n path_img_dir = str(Path(img_dir).expanduser().resolve())\n path_streams_dir = str(Path(streams_dir).expanduser().resolve())\n logger.info(\"Starting watch process\")\n\n Config.load_data()\n # Lauch threads\n # SEEME: in case an event will come from watching config files, there is a race condition between update_config\n # thread using indirectly MirrorManager.img_dir and thread update_metadata setting MirrorManager.img_dir\n # Also, race condition on calling MirrorManager.update_mirror_list() in both threads.\n update_config(skip_watch_config_non_existent)\n update_metadata(path_img_dir, path_streams_dir)\n logger.debug(\"Watching image directory {}\".format(path_img_dir))\n\n i = inotify.adapters.InotifyTree(path_img_dir,\n mask=(IN_ATTRIB | IN_DELETE |\n IN_MOVED_FROM | IN_MOVED_TO |\n IN_CLOSE_WRITE))\n\n while True:\n events = i.event_gen(yield_nones=False, timeout_s=15)\n files_changed = needs_update(events)\n if files_changed:\n event_queue.put(files_changed)\n\n\ndef main():\n try:\n sys.exit(cli())\n except Exception:\n logger.error(traceback.format_exc())\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
11,
12,
14,
16
]
}
|
[
7,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
<|reserved_special_token_0|>
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
<|reserved_special_token_0|>
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
<|reserved_special_token_0|>
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == 'forward':
forward(tym)
stop(tym)
elif direction == 'reverse':
reverse(tym)
stop(tym)
elif direction == 'left':
turn_left(tym)
stop(tym)
elif direction == 'right':
turn_right(tym)
stop(tym)
elif direction == 'stop':
stop(tym)
else:
stop(tym)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == 'forward':
forward(tym)
stop(tym)
elif direction == 'reverse':
reverse(tym)
stop(tym)
elif direction == 'left':
turn_left(tym)
stop(tym)
elif direction == 'right':
turn_right(tym)
stop(tym)
elif direction == 'stop':
stop(tym)
else:
stop(tym)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gpio.setwarnings(False)
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == 'forward':
forward(tym)
stop(tym)
elif direction == 'reverse':
reverse(tym)
stop(tym)
elif direction == 'left':
turn_left(tym)
stop(tym)
elif direction == 'right':
turn_right(tym)
stop(tym)
elif direction == 'stop':
stop(tym)
else:
stop(tym)
if __name__ == '__main__':
import sys
drive(sys.argv[1], float(sys.argv[2]))
gpio.cleanup()
<|reserved_special_token_1|>
from time import sleep
import RPi.GPIO as gpio
#GPIO.setmode(GPIO.BCM)
gpio.setwarnings(False)
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == "forward":
forward(tym)
stop(tym)
elif direction == "reverse":
reverse(tym)
stop(tym)
elif direction == "left":
turn_left(tym)
stop(tym)
elif direction == "right":
turn_right(tym)
stop(tym)
elif direction == "stop":
stop(tym)
else :
stop(tym)
if __name__ == '__main__':
import sys
drive((sys.argv[1]), float(sys.argv[2]))
gpio.cleanup()
##
##init()
##forward(0.6)
##sleep(1)
##reverse(0.6)
##sleep(1)
##turn_right(0.6)
##sleep(1)
##turn_left(0.6)
##stop(1)
|
flexible
|
{
"blob_id": "a7cbd595b86908fb399bf11e1522588e0b0475c3",
"index": 9226,
"step-1": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\n<mask token>\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\n<mask token>\n",
"step-4": "<mask token>\ngpio.setwarnings(False)\n\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n\n\ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n\n\ndef drive(direction, tym):\n init()\n if direction == 'forward':\n forward(tym)\n stop(tym)\n elif direction == 'reverse':\n reverse(tym)\n stop(tym)\n elif direction == 'left':\n turn_left(tym)\n stop(tym)\n elif direction == 'right':\n turn_right(tym)\n stop(tym)\n elif direction == 'stop':\n stop(tym)\n else:\n stop(tym)\n\n\nif __name__ == '__main__':\n import sys\n drive(sys.argv[1], float(sys.argv[2]))\n gpio.cleanup()\n",
"step-5": "from time import sleep\nimport RPi.GPIO as gpio\n#GPIO.setmode(GPIO.BCM)\ngpio.setwarnings(False)\n\ndef init():\n gpio.setmode(gpio.BCM)\n gpio.setup(26, gpio.OUT)\n gpio.setup(19, gpio.OUT)\n gpio.setup(13, gpio.OUT)\n gpio.setup(6, gpio.OUT)\n\ndef turn_left(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef turn_right(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n \ndef forward(tf):\n gpio.output(26, True)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, True)\n sleep(tf)\n \ndef reverse(tf):\n gpio.output(26, False)\n gpio.output(19, True)\n gpio.output(13, True)\n gpio.output(6, False)\n sleep(tf)\n\ndef stop(tf):\n gpio.output(26, False)\n gpio.output(19, False)\n gpio.output(13, False)\n gpio.output(6, False)\n sleep(tf)\n gpio.cleanup()\n \ndef drive(direction, tym):\n init()\n \n if direction == \"forward\":\n forward(tym)\n stop(tym)\n \n elif direction == \"reverse\":\n reverse(tym)\n stop(tym)\n\n elif direction == \"left\":\n turn_left(tym)\n stop(tym)\n\n elif direction == \"right\":\n turn_right(tym)\n stop(tym)\n\n elif direction == \"stop\":\n stop(tym)\n\n else :\n stop(tym)\n\n\n\nif __name__ == '__main__':\n\timport sys\n\tdrive((sys.argv[1]), float(sys.argv[2]))\n\tgpio.cleanup()\n\n##\n##init()\n##forward(0.6)\n##sleep(1)\n##reverse(0.6)\n##sleep(1)\n##turn_right(0.6)\n##sleep(1)\n##turn_left(0.6)\n##stop(1)\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from phylo_utils.data import fixed_equal_nucleotide_frequencies
from phylo_utils.substitution_models.tn93 import TN93
class K80(TN93):
_name = 'K80'
_freqs = fixed_equal_nucleotide_frequencies.copy()
def __init__(self, kappa, scale_q=True):
super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q
)
|
normal
|
{
"blob_id": "0f0595793e98187c6aaf5b1f4b59affb06bb598e",
"index": 3159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass K80(TN93):\n <mask token>\n <mask token>\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-3": "<mask token>\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-4": "from phylo_utils.data import fixed_equal_nucleotide_frequencies\nfrom phylo_utils.substitution_models.tn93 import TN93\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
indelCost = 1
swapCost = 13
subCost = 12
noOp = 0
def alignStrings(x,y):
nx = len(x)
ny = len(y)
S = matrix(nx+1, ny+1) #??
for i in range (nx+1)
for j in range (ny+1)
if i == 0: #if the string is empty
S[i][j] = j #this will put all the letters from j in i
elif j == 0: #if the second string is empy
S[i][j] = i #this will putj all the letter from i in j
elif
|
normal
|
{
"blob_id": "65aa85675393efa1a0d8e5bab4b1dbf388018c58",
"index": 261,
"step-1": "\nindelCost = 1\nswapCost = 13\nsubCost = 12\nnoOp = 0\n\t\ndef alignStrings(x,y):\n\t\n\tnx = len(x)\n\tny = len(y)\n\tS = matrix(nx+1, ny+1) #?? \n\t\n\tfor i in range (nx+1)\n\t\tfor j in range (ny+1)\n\t\t\tif i == 0:\t#if the string is empty\n\t\t\t\tS[i][j] = j\t\t\t#this will put all the letters from j in i\n\t\t\telif j == 0:\t\t#if the second string is empy\n\t\t\t\tS[i][j] = i\t\t#this will putj all the letter from i in j\n\t\t\telif \n\t\t\t\t\n\t\t\t\n\t\n\t\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(
f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(
f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
if __name__ == '__main__':
args = docopt(__doc__)
for ip in open(args['<ip-file>']):
os.system(
f"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}"
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from docopt import docopt
import paramiko
import os
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(
f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
if __name__ == '__main__':
args = docopt(__doc__)
for ip in open(args['<ip-file>']):
os.system(
f"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}"
)
<|reserved_special_token_1|>
""" GetState
Usage:
get_state.py <pem-file> <ip-file> [options]
Options:
-h, --help print help message and exit
--output DIR set the output directory [default: logs]
"""
from docopt import docopt
import paramiko
import os
def get_logs(ip_addr, pem_file, log_dir):
pem = paramiko.RSAKey.from_private_key_file(pem_file)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=ip_addr, username="ec2-user", pkey=pem)
ftp = client.open_sftp()
logs = sorted(ftp.listdir('/home/ec2-user/logs/'))
for l in logs:
if l.endswith('.txt'):
print(l)
client.exec_command(f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')
ftp.get(f'/home/ec2-user/logs/tmp', f"{log_dir}/{l}")
client.exec_command('rm /home/ec2-user/logs/tmp')
ftp.close()
client.close()
if __name__ == '__main__':
args = docopt(__doc__)
for ip in open(args['<ip-file>']):
os.system(f"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}")
#get_logs(ip.strip(), args['<pem-file>'], args['--output'])
|
flexible
|
{
"blob_id": "a1df804325a074ed980ec864c72fe231e2968997",
"index": 4024,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n for ip in open(args['<ip-file>']):\n os.system(\n f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\"\n )\n",
"step-4": "<mask token>\nfrom docopt import docopt\nimport paramiko\nimport os\n\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username='ec2-user', pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(\n f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f'{log_dir}/{l}')\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n for ip in open(args['<ip-file>']):\n os.system(\n f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\"\n )\n",
"step-5": "\"\"\" GetState\nUsage:\n get_state.py <pem-file> <ip-file> [options]\n\nOptions:\n -h, --help print help message and exit\n --output DIR set the output directory [default: logs]\n\"\"\"\n\nfrom docopt import docopt\nimport paramiko\nimport os\n\ndef get_logs(ip_addr, pem_file, log_dir):\n pem = paramiko.RSAKey.from_private_key_file(pem_file)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=ip_addr, username=\"ec2-user\", pkey=pem)\n ftp = client.open_sftp()\n logs = sorted(ftp.listdir('/home/ec2-user/logs/'))\n for l in logs:\n if l.endswith('.txt'):\n print(l)\n client.exec_command(f'cat /home/ec2-user/logs/{l} > /home/ec2-user/logs/tmp')\n ftp.get(f'/home/ec2-user/logs/tmp', f\"{log_dir}/{l}\")\n client.exec_command('rm /home/ec2-user/logs/tmp')\n ftp.close()\n client.close()\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n\n for ip in open(args['<ip-file>']):\n os.system(f\"scp -i {args['<pem-file>']} ec2-user@{ip.strip()}:~/logs/*.txt {args['--output']}\")\n #get_logs(ip.strip(), args['<pem-file>'], args['--output'])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Chris DeBoever
# cdeboeve@ucsd.edu
import sys, argparse, pdb, glob, os, re
import numpy as np
from bisect import bisect_left
from scipy.stats import binom
### helper functions ###
def find_lt(a,x):
"""
Find rightmost value less than x in list a
Input: list a and value x
Output: rightmost value less than x in a
"""
i = bisect_left(a,x)
if i:
return a[i-1]
raise ValueError
def find_ge(a,x):
"""
Find leftmost item greater than or equal to x in list a
Input: list a and value x
Output: leftmost value less than or equal to x in a
"""
i = bisect_left(a,x)
if i != len(a):
return a[i]
raise ValueError
def get_altL(fn):
"""
Make a list of alternate allele frequencies and number of reads
Input: tsv file with reference freq in first column and alterate freq in second column
Output: a list of tuples with number of reads and alternate allele frequency
"""
f = open(fn,'r')
linesL = [ x.strip().split('\t') for x in f.readlines() ]
f.close()
if linesL[0][0][0] == '#':
linesL = linesL[1:]
for i in range(len(linesL)):
if linesL[i][4] == '0': # if the number of reads supporting alternate is 0, we'll switch to 1 so avoid numeric issues
linesL[i][4] = '1'
return zip([ int(x[4])+int(x[5]) for x in linesL ], [ float(x[5])/(float(x[4])+float(x[5])) for x in linesL ]) # each tuple is [freq,num_reads]
# def generate_cancer_possible_freqL(pL,sL,er):
# I want to make a function which generates the likely frequencies seen in a cancer sample. This would exclude double-hit mutations (i.e. a single site gains somatic mutations on both chromosomes). This simplifications can only be made in the diploid case, however, because ploidy-variable populations might be weird...
def generate_possible_freqL(pL,sL,er):
"""
Generate list of possible allele frequencies
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: list of possible allele frequences
"""
h = sum(pL) # number of different haplotypes
L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq
M = [ '0'*(len(L[-1])-len(x))+x for x in L ]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i]/pL[i]]*pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aL = []
for g in M:
aL.append(sum(np.array([ int(x) for x in list(g) ])*p_freqL))
return sorted(list(set(aL+[er,1-er])))
def freq_to_genotype(pL,sL,er):
"""
Creates dict of expected alternate allele frequencies and consistent genotypes
Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate
Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list
"""
h = sum(pL) # number of different haplotypes
L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq
M = [ '0'*(len(L[-1])-len(x))+x for x in L ]
p_freqL = []
for i in range(len(pL)):
p_freqL += [sL[i]/pL[i]]*pL[i]
p_freqA = np.array(p_freqL)
sA = np.array(sL)
aD = {} # dict where each key is an expected alternate allele frequency and each value is a list of genotypes consistent with this alternate allele frequency
for g in M:
alt_freq = sum(np.array([ int(x) for x in list(g) ])*p_freqL)
if aD.has_key(alt_freq):
aD[alt_freq].append(g)
else:
aD[alt_freq] = [g]
aD[er] = ['0'*(len(L[-1])-1) + bin(0)[2:]] # add genotype for 0% alternate allele freq
aD[1-er] = [bin(2**h-1)[2:]] # add genotype for 100% alternate allele freq
return aD
def collapse_genotypes(pL,gL):
"""
Reduces a list of genotypes to distinct genotypes given ploidy
Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list
Output: genotype list with non-redundant genotypes
"""
if len(gL) < 2:
return gL
else:
uniqueL = [] # list of unique genotypes relative to ploidy
for g in gL:
s = ''
for i in xrange(len(pL)):
s += ''.join(sorted(g[0:pL[i]]))
g = g[pL[i]:]
if s not in uniqueL:
uniqueL.append(s)
return uniqueL
def grid_search_parameters(step):
"""
Make a list of parameters to try
Input: step size
Output: subpopulation frequencies to try
"""
f1 = list(np.arange(step,1,step))
f2 = list(np.arange(step,1,step))
f2.reverse()
return zip(f1,f2)
def estimate_genotype(alt_freq,exp_freqL):
"""
Maximum likelihood estimator of alt_freq given possibilities in exp_freqL
Input: observed alternate frequency and list of expected alternate frequencies
Output: ML estimator of true alternate allele frequency
"""
try:
i = find_lt(exp_freqL,alt_freq) # Find rightmost value less than x
except ValueError:
i = float("-inf")
try:
j = find_ge(exp_freqL,alt_freq) # Find leftmost item greater than or equal to x
except ValueError:
j = float("inf")
if alt_freq-i < j-alt_freq:
return i
else:
return j
def main():
### magic variables ###
# these variables can be set at the command line as well
ploidyL = [2,2] # the entries in this list are the expected ploidy of each subpopulation. Default is two diploid subpopulations
error_rate = 0.001 # sequencing error rate
cov_cutoff = 4 # coverage cutoff for variant sites
### gather command line arguments ###
parser = argparse.ArgumentParser(description='This script determines the relative frequencies of different populations and estimates the genotypes.')
parser.add_argument('infile', help='Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.')
parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),default=sys.stdout, help='Output file. Default: standard out')
parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help='A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'.format(' '.join([str(x) for x in ploidyL])))
parser.add_argument('-er', default=error_rate, type=float, help='Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'.format(error_rate))
parser.add_argument('-cc', default=cov_cutoff, type=int, help='Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'.format(cov_cutoff))
parser.add_argument('-d', action='store_true', help='Enable python debugger.')
args = parser.parse_args()
inN = args.infile
outF = args.o
ploidyL = args.pL
error_rate = args.er
debug = args.d
inN = os.path.realpath(inN) # get the input file path
if len(ploidyL) > 2:
print >>sys.stderr, 'Sorry, only two subpopulations are currently supported.'
sys.exit(1)
altL = get_altL(inN) # a list of number of reads and alternate allele frequencies
tempL = []
for a in altL:
if a[0]*a[1] > cov_cutoff and a[0]*(1-a[1]) > cov_cutoff and a[0] > cov_cutoff:
tempL.append(a)
altL = tempL
### find population frequencies ###
parL = grid_search_parameters(0.01) # grid search
best_par = []
best_ll = float("-inf")
for par in parL:
exp_freqL = generate_possible_freqL(ploidyL,par,error_rate)
ll = 0 # log-likelihood
for alt in altL:
exp_freq = estimate_genotype(alt[1],exp_freqL)
ll += np.log(binom.pmf(round(alt[0]*alt[1]),alt[0],exp_freq))
# round(alt[0]*alt[1]) is the number of reads we saw supporting alternate allele (i.e. the number of successes under the binomial test)
# alt[0] is the total number of reads covering this site (i.e. the number of attempts in our binomial test)
# exp_freq is our probability of success (i.e. observing a read supporting alternate) from our ML estimation (see estimate_genotype)
if ll > best_ll:
best_ll = ll
best_par = par
### determine genotypes ###
altD = freq_to_genotype(ploidyL,best_par,error_rate) # dict whose keys are alternate allele frequencies and whose values are lists of consistent genotypes
for k in altD.keys():
altD[k] = collapse_genotypes(ploidyL,altD[k])
exp_freqL = sorted(altD.keys())
print >>outF, '#log-likelihood\t{0}\n#population frequencies\t{1}'.format(best_ll,'\t'.join([ str(x) for x in best_par ]))
inF = open(inN,'r')
linesL = inF.readlines()
inF.close()
if linesL[0][0] == '#':
linesL = linesL[1:]
for i in xrange(len(altL)):
alt = altL[i]
[chr,pos,refbase,altbase,refcov,altcov] = linesL[i].strip().split('\t')
genotypeL = altD[estimate_genotype(alt[1],exp_freqL)]
for g in genotypeL:
g = re.sub('0',refbase,g)
g = re.sub('1',altbase,g)
tempL = [] # each element of this list is the genotype of a population
for i in xrange(len(ploidyL)):
tempL.append(g[0:ploidyL[i]])
g = g[ploidyL[i]:]
print >>outF, '\t'.join([chr,pos] + tempL)
# use best population frequency parameters and walk through sites, assign genotypes, p-values or scores maybe?
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "da751e96c225ebc2d30f3cce01ba2f64d0a29257",
"index": 3763,
"step-1": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\n<mask token>\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\ndef main():\n ploidyL = [2, 2]\n error_rate = 0.001\n cov_cutoff = 4\n parser = argparse.ArgumentParser(description=\n 'This script determines the relative frequencies of different populations and estimates the genotypes.'\n )\n parser.add_argument('infile', help=\n 'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'\n )\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),\n default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=\n 'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'\n .format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help=\n 'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'\n .format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help=\n 'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'\n .format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help=\n 'Enable python debugger.')\n args = parser.parse_args()\n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n inN = os.path.realpath(inN)\n if len(ploidyL) > 2:\n print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n altL = get_altL(inN)\n tempL = []\n for a in altL:\n if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0\n ] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n parL = grid_search_parameters(0.01)\n best_par = []\n best_ll = float('-inf')\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)\n ll = 0\n for alt in altL:\n exp_freq = estimate_genotype(alt[1], exp_freqL)\n ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))\n if ll > best_ll:\n best_ll = ll\n best_par = par\n altD = freq_to_genotype(ploidyL, best_par, error_rate)\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL, altD[k])\n exp_freqL = sorted(altD.keys())\n print >> outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(\n best_ll, '\\t'.join([str(x) for x in best_par]))\n inF = open(inN, 'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(\n '\\t')\n genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]\n for g in genotypeL:\n g = re.sub('0', refbase, g)\n g = re.sub('1', altbase, g)\n tempL = []\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >> outF, '\\t'.join([chr, pos] + tempL)\n\n\n<mask token>\n",
"step-4": "import sys, argparse, pdb, glob, os, re\nimport numpy as np\nfrom bisect import bisect_left\nfrom scipy.stats import binom\n\n\ndef find_lt(a, x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a, x)\n if i:\n return a[i - 1]\n raise ValueError\n\n\ndef find_ge(a, x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a, x)\n if i != len(a):\n return a[i]\n raise ValueError\n\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn, 'r')\n linesL = [x.strip().split('\\t') for x in f.readlines()]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0':\n linesL[i][4] = '1'\n return zip([(int(x[4]) + int(x[5])) for x in linesL], [(float(x[5]) / (\n float(x[4]) + float(x[5]))) for x in linesL])\n\n\ndef generate_possible_freqL(pL, sL, er):\n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([int(x) for x in list(g)]) * p_freqL))\n return sorted(list(set(aL + [er, 1 - er])))\n\n\ndef freq_to_genotype(pL, sL, er):\n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL)\n L = [bin(x)[2:] for x in range(1, 2 ** h - 1)]\n M = [('0' * (len(L[-1]) - len(x)) + x) for x in L]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i] / pL[i]] * pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {}\n for g in M:\n alt_freq = sum(np.array([int(x) for x in list(g)]) * p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0' * (len(L[-1]) - 1) + bin(0)[2:]]\n aD[1 - er] = [bin(2 ** h - 1)[2:]]\n return aD\n\n\ndef collapse_genotypes(pL, gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = []\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n\n\ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step, 1, step))\n f2 = list(np.arange(step, 1, step))\n f2.reverse()\n return zip(f1, f2)\n\n\ndef estimate_genotype(alt_freq, exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL, alt_freq)\n except ValueError:\n i = float('-inf')\n try:\n j = find_ge(exp_freqL, alt_freq)\n except ValueError:\n j = float('inf')\n if alt_freq - i < j - alt_freq:\n return i\n else:\n return j\n\n\ndef main():\n ploidyL = [2, 2]\n error_rate = 0.001\n cov_cutoff = 4\n parser = argparse.ArgumentParser(description=\n 'This script determines the relative frequencies of different populations and estimates the genotypes.'\n )\n parser.add_argument('infile', help=\n 'Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.'\n )\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),\n default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help=\n 'A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'\n .format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help=\n 'Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'\n .format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help=\n 'Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'\n .format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help=\n 'Enable python debugger.')\n args = parser.parse_args()\n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n inN = os.path.realpath(inN)\n if len(ploidyL) > 2:\n print >> sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n altL = get_altL(inN)\n tempL = []\n for a in altL:\n if a[0] * a[1] > cov_cutoff and a[0] * (1 - a[1]) > cov_cutoff and a[0\n ] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n parL = grid_search_parameters(0.01)\n best_par = []\n best_ll = float('-inf')\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL, par, error_rate)\n ll = 0\n for alt in altL:\n exp_freq = estimate_genotype(alt[1], exp_freqL)\n ll += np.log(binom.pmf(round(alt[0] * alt[1]), alt[0], exp_freq))\n if ll > best_ll:\n best_ll = ll\n best_par = par\n altD = freq_to_genotype(ploidyL, best_par, error_rate)\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL, altD[k])\n exp_freqL = sorted(altD.keys())\n print >> outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(\n best_ll, '\\t'.join([str(x) for x in best_par]))\n inF = open(inN, 'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr, pos, refbase, altbase, refcov, altcov] = linesL[i].strip().split(\n '\\t')\n genotypeL = altD[estimate_genotype(alt[1], exp_freqL)]\n for g in genotypeL:\n g = re.sub('0', refbase, g)\n g = re.sub('1', altbase, g)\n tempL = []\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >> outF, '\\t'.join([chr, pos] + tempL)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Chris DeBoever\n# cdeboeve@ucsd.edu\n\nimport sys, argparse, pdb, glob, os, re\nimport numpy as np\nfrom bisect import bisect_left \nfrom scipy.stats import binom\n\n### helper functions ###\n\ndef find_lt(a,x):\n \"\"\"\n Find rightmost value less than x in list a\n Input: list a and value x\n Output: rightmost value less than x in a\n \"\"\"\n i = bisect_left(a,x)\n if i:\n return a[i-1]\n raise ValueError\n\ndef find_ge(a,x):\n \"\"\"\n Find leftmost item greater than or equal to x in list a\n Input: list a and value x\n Output: leftmost value less than or equal to x in a\n \"\"\"\n i = bisect_left(a,x)\n if i != len(a):\n return a[i]\n raise ValueError\n\ndef get_altL(fn):\n \"\"\"\n Make a list of alternate allele frequencies and number of reads\n Input: tsv file with reference freq in first column and alterate freq in second column\n Output: a list of tuples with number of reads and alternate allele frequency\n \"\"\"\n f = open(fn,'r')\n linesL = [ x.strip().split('\\t') for x in f.readlines() ]\n f.close()\n if linesL[0][0][0] == '#':\n linesL = linesL[1:]\n for i in range(len(linesL)):\n if linesL[i][4] == '0': # if the number of reads supporting alternate is 0, we'll switch to 1 so avoid numeric issues\n linesL[i][4] = '1'\n return zip([ int(x[4])+int(x[5]) for x in linesL ], [ float(x[5])/(float(x[4])+float(x[5])) for x in linesL ]) # each tuple is [freq,num_reads]\n\n# def generate_cancer_possible_freqL(pL,sL,er): \n# I want to make a function which generates the likely frequencies seen in a cancer sample. This would exclude double-hit mutations (i.e. a single site gains somatic mutations on both chromosomes). This simplifications can only be made in the diploid case, however, because ploidy-variable populations might be weird...\n\ndef generate_possible_freqL(pL,sL,er): \n \"\"\"\n Generate list of possible allele frequencies\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: list of possible allele frequences\n \"\"\"\n h = sum(pL) # number of different haplotypes\n L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq\n M = [ '0'*(len(L[-1])-len(x))+x for x in L ]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i]/pL[i]]*pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aL = []\n for g in M:\n aL.append(sum(np.array([ int(x) for x in list(g) ])*p_freqL))\n return sorted(list(set(aL+[er,1-er]))) \n\ndef freq_to_genotype(pL,sL,er): \n \"\"\"\n Creates dict of expected alternate allele frequencies and consistent genotypes\n Input: ploidy list, frequency (of each subpopulation) list, and sequencing error rate\n Output: dict of expected alternate allele frequencies and consistent genotypes. Genotypes represented as binary strings in the order of the ploidy list\n \"\"\"\n h = sum(pL) # number of different haplotypes\n L = [ bin(x)[2:] for x in range(1,2**h-1) ] # range from 1 to 2**h-1 because we don't want 0% or 100% allele freq\n M = [ '0'*(len(L[-1])-len(x))+x for x in L ]\n p_freqL = []\n for i in range(len(pL)):\n p_freqL += [sL[i]/pL[i]]*pL[i]\n p_freqA = np.array(p_freqL)\n sA = np.array(sL)\n aD = {} # dict where each key is an expected alternate allele frequency and each value is a list of genotypes consistent with this alternate allele frequency\n for g in M:\n alt_freq = sum(np.array([ int(x) for x in list(g) ])*p_freqL)\n if aD.has_key(alt_freq):\n aD[alt_freq].append(g)\n else:\n aD[alt_freq] = [g]\n aD[er] = ['0'*(len(L[-1])-1) + bin(0)[2:]] # add genotype for 0% alternate allele freq\n aD[1-er] = [bin(2**h-1)[2:]] # add genotype for 100% alternate allele freq\n return aD\n\ndef collapse_genotypes(pL,gL):\n \"\"\"\n Reduces a list of genotypes to distinct genotypes given ploidy\n Input: ploidy list pL and list of genotypes gL where each genotype is a binary string ordered according to ploidy list\n Output: genotype list with non-redundant genotypes\n \"\"\"\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL\n \ndef grid_search_parameters(step):\n \"\"\"\n Make a list of parameters to try\n Input: step size\n Output: subpopulation frequencies to try\n \"\"\"\n f1 = list(np.arange(step,1,step))\n f2 = list(np.arange(step,1,step))\n f2.reverse()\n return zip(f1,f2)\n\ndef estimate_genotype(alt_freq,exp_freqL):\n \"\"\"\n Maximum likelihood estimator of alt_freq given possibilities in exp_freqL\n Input: observed alternate frequency and list of expected alternate frequencies\n Output: ML estimator of true alternate allele frequency\n \"\"\"\n try:\n i = find_lt(exp_freqL,alt_freq) # Find rightmost value less than x\n except ValueError:\n i = float(\"-inf\")\n try:\n j = find_ge(exp_freqL,alt_freq) # Find leftmost item greater than or equal to x\n except ValueError:\n j = float(\"inf\")\n if alt_freq-i < j-alt_freq:\n return i\n else:\n return j\n\ndef main():\n ### magic variables ###\n # these variables can be set at the command line as well\n ploidyL = [2,2] # the entries in this list are the expected ploidy of each subpopulation. Default is two diploid subpopulations\n error_rate = 0.001 # sequencing error rate\n cov_cutoff = 4 # coverage cutoff for variant sites\n\n ### gather command line arguments ###\n parser = argparse.ArgumentParser(description='This script determines the relative frequencies of different populations and estimates the genotypes.')\n parser.add_argument('infile', help='Input tsv file. Columns should be: chrom, position, ref base, alt base, number of reads supporting reference, number of reads supporting alternate.')\n parser.add_argument('-o', nargs='?', type=argparse.FileType('w'),default=sys.stdout, help='Output file. Default: standard out')\n parser.add_argument('-pL', default=ploidyL, type=int, nargs='+', help='A list of ploidies. Each entry in the list represents the anticipated ploidy of a subpopulation. For instance, if you expect two diploid subpopulations and one triploid subpopulation, enter 2 2 3. Default: {0}'.format(' '.join([str(x) for x in ploidyL])))\n parser.add_argument('-er', default=error_rate, type=float, help='Sequencing error rate. For instance, 0.01 means that 1/100 base calls will be incorrect. Default: {0}'.format(error_rate))\n parser.add_argument('-cc', default=cov_cutoff, type=int, help='Coverage cutoff. If the coverage of either the alternate or reference allele is less than or equal to this value, the site will not be considered as a variant site. Default: {0}'.format(cov_cutoff))\n parser.add_argument('-d', action='store_true', help='Enable python debugger.')\n \n args = parser.parse_args()\n \n inN = args.infile\n outF = args.o\n ploidyL = args.pL\n error_rate = args.er\n debug = args.d\n\n inN = os.path.realpath(inN) # get the input file path\n\n if len(ploidyL) > 2:\n print >>sys.stderr, 'Sorry, only two subpopulations are currently supported.'\n sys.exit(1)\n\n altL = get_altL(inN) # a list of number of reads and alternate allele frequencies\n tempL = []\n for a in altL:\n if a[0]*a[1] > cov_cutoff and a[0]*(1-a[1]) > cov_cutoff and a[0] > cov_cutoff:\n tempL.append(a)\n altL = tempL\n\n ### find population frequencies ###\n\n parL = grid_search_parameters(0.01) # grid search\n best_par = []\n best_ll = float(\"-inf\")\n\n for par in parL:\n exp_freqL = generate_possible_freqL(ploidyL,par,error_rate)\n ll = 0 # log-likelihood\n\n for alt in altL:\n exp_freq = estimate_genotype(alt[1],exp_freqL)\n ll += np.log(binom.pmf(round(alt[0]*alt[1]),alt[0],exp_freq)) \n # round(alt[0]*alt[1]) is the number of reads we saw supporting alternate allele (i.e. the number of successes under the binomial test)\n # alt[0] is the total number of reads covering this site (i.e. the number of attempts in our binomial test)\n # exp_freq is our probability of success (i.e. observing a read supporting alternate) from our ML estimation (see estimate_genotype)\n \n if ll > best_ll:\n best_ll = ll\n best_par = par\n\n ### determine genotypes ###\n altD = freq_to_genotype(ploidyL,best_par,error_rate) # dict whose keys are alternate allele frequencies and whose values are lists of consistent genotypes\n for k in altD.keys():\n altD[k] = collapse_genotypes(ploidyL,altD[k])\n exp_freqL = sorted(altD.keys()) \n\n print >>outF, '#log-likelihood\\t{0}\\n#population frequencies\\t{1}'.format(best_ll,'\\t'.join([ str(x) for x in best_par ]))\n\n inF = open(inN,'r')\n linesL = inF.readlines()\n inF.close()\n if linesL[0][0] == '#':\n linesL = linesL[1:]\n for i in xrange(len(altL)):\n alt = altL[i]\n [chr,pos,refbase,altbase,refcov,altcov] = linesL[i].strip().split('\\t')\n genotypeL = altD[estimate_genotype(alt[1],exp_freqL)] \n for g in genotypeL:\n g = re.sub('0',refbase,g)\n g = re.sub('1',altbase,g)\n tempL = [] # each element of this list is the genotype of a population\n for i in xrange(len(ploidyL)):\n tempL.append(g[0:ploidyL[i]])\n g = g[ploidyL[i]:]\n print >>outF, '\\t'.join([chr,pos] + tempL)\n\n # use best population frequency parameters and walk through sites, assign genotypes, p-values or scores maybe?\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
'''
fibonacci(6) => [1, 1, 2, 3, 5, 8]
fibonacci(7) => [1, 1, 2, 3, 5, 8, 13]
'''
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n-1)
suma = lista[len(lista)-1] + lista[len(lista)-2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "03062ea08bd6ad88376f7c2aa2c89d2194ed8b2e",
"index": 1074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nfibonacci(6) => [1, 1, 2, 3, 5, 8]\nfibonacci(7) => [1, 1, 2, 3, 5, 8, 13]\n'''\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n-1)\n suma = lista[len(lista)-1] + lista[len(lista)-2]\n lista.append(suma)\n return lista\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def check_bit4(input):
mas=0b1000
desired=input & mas
if desired>0:
return "om"
else :
return "off"
|
normal
|
{
"blob_id": "29dc940292a6805aabfa5bed22bb75d31140c83f",
"index": 3257,
"step-1": "<mask token>\n",
"step-2": "def check_bit4(input):\n mas = 8\n desired = input & mas\n if desired > 0:\n return 'om'\n else:\n return 'off'\n",
"step-3": "def check_bit4(input):\n\tmas=0b1000\n\tdesired=input & mas\n\tif desired>0:\n\t\treturn \"om\"\n\telse :\n\t\treturn \"off\"\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#define the simple_divide function here
def simple_divide(item, denom):
# start a try-except block
try:
return item/denom
except ZeroDivisionError:
return 0
def fancy_divide(list_of_numbers, index):
denom = list_of_numbers[index]
return [simple_divide(item, denom) for item in list_of_numbers]
def main():
data = input()
l=data.split()
l1=[]
for j in l:
l1.append(float(j))
s=input()
index=int(s)
print(fancy_divide(l1,index))
if __name__== "__main__":
main()
|
normal
|
{
"blob_id": "1fbdb0b40f0d65fffec482b63aa2192968b01d4b",
"index": 9766,
"step-1": "def simple_divide(item, denom):\n try:\n return item / denom\n except ZeroDivisionError:\n return 0\n\n\n<mask token>\n",
"step-2": "def simple_divide(item, denom):\n try:\n return item / denom\n except ZeroDivisionError:\n return 0\n\n\n<mask token>\n\n\ndef main():\n data = input()\n l = data.split()\n l1 = []\n for j in l:\n l1.append(float(j))\n s = input()\n index = int(s)\n print(fancy_divide(l1, index))\n\n\n<mask token>\n",
"step-3": "def simple_divide(item, denom):\n try:\n return item / denom\n except ZeroDivisionError:\n return 0\n\n\ndef fancy_divide(list_of_numbers, index):\n denom = list_of_numbers[index]\n return [simple_divide(item, denom) for item in list_of_numbers]\n\n\ndef main():\n data = input()\n l = data.split()\n l1 = []\n for j in l:\n l1.append(float(j))\n s = input()\n index = int(s)\n print(fancy_divide(l1, index))\n\n\n<mask token>\n",
"step-4": "def simple_divide(item, denom):\n try:\n return item / denom\n except ZeroDivisionError:\n return 0\n\n\ndef fancy_divide(list_of_numbers, index):\n denom = list_of_numbers[index]\n return [simple_divide(item, denom) for item in list_of_numbers]\n\n\ndef main():\n data = input()\n l = data.split()\n l1 = []\n for j in l:\n l1.append(float(j))\n s = input()\n index = int(s)\n print(fancy_divide(l1, index))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#define the simple_divide function here\ndef simple_divide(item, denom):\n # start a try-except block\n try:\n return item/denom\n except ZeroDivisionError:\n return 0\n \ndef fancy_divide(list_of_numbers, index):\n denom = list_of_numbers[index]\n return [simple_divide(item, denom) for item in list_of_numbers]\n\ndef main():\n data = input()\n l=data.split()\n l1=[]\n for j in l:\n l1.append(float(j))\n s=input()\n index=int(s)\n print(fancy_divide(l1,index))\nif __name__== \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import openpyxl
class TestXLUtility:
def __init__(self, driver):
self.driver = driver
def getRowCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return(sheet.max_row)
def getColumnCount(file, sheetname):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return (sheet.max_column)
def readData(file,sheetname,rownum,columno):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
return(sheet.cell(row=rownum, column=columno).value)
def writeData(file,sheetname,rownum,columno,data):
workbook = openpyxl.load_workbook(file)
#sheet = workbook.get_sheet_by_name(sheetname)
sheet = workbook[sheetname]
sheet.cell(row=rownum, column=columno).value = data
workbook.save(file)
|
normal
|
{
"blob_id": "adae4f9ebcbbb775fc40278ceec9a0cc30c0a503",
"index": 1541,
"step-1": "<mask token>\n\n\nclass TestXLUtility:\n <mask token>\n\n def getRowCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_row\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestXLUtility:\n <mask token>\n\n def getRowCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_row\n\n def getColumnCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_column\n <mask token>\n\n def writeData(file, sheetname, rownum, columno, data):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n sheet.cell(row=rownum, column=columno).value = data\n workbook.save(file)\n",
"step-3": "<mask token>\n\n\nclass TestXLUtility:\n\n def __init__(self, driver):\n self.driver = driver\n\n def getRowCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_row\n\n def getColumnCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_column\n\n def readData(file, sheetname, rownum, columno):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.cell(row=rownum, column=columno).value\n\n def writeData(file, sheetname, rownum, columno, data):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n sheet.cell(row=rownum, column=columno).value = data\n workbook.save(file)\n",
"step-4": "import openpyxl\n\n\nclass TestXLUtility:\n\n def __init__(self, driver):\n self.driver = driver\n\n def getRowCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_row\n\n def getColumnCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.max_column\n\n def readData(file, sheetname, rownum, columno):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n return sheet.cell(row=rownum, column=columno).value\n\n def writeData(file, sheetname, rownum, columno, data):\n workbook = openpyxl.load_workbook(file)\n sheet = workbook[sheetname]\n sheet.cell(row=rownum, column=columno).value = data\n workbook.save(file)\n",
"step-5": "import openpyxl\n\nclass TestXLUtility:\n\n def __init__(self, driver):\n self.driver = driver\n\n def getRowCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n #sheet = workbook.get_sheet_by_name(sheetname)\n sheet = workbook[sheetname]\n return(sheet.max_row)\n\n def getColumnCount(file, sheetname):\n workbook = openpyxl.load_workbook(file)\n #sheet = workbook.get_sheet_by_name(sheetname)\n sheet = workbook[sheetname]\n return (sheet.max_column)\n\n def readData(file,sheetname,rownum,columno):\n workbook = openpyxl.load_workbook(file)\n #sheet = workbook.get_sheet_by_name(sheetname)\n sheet = workbook[sheetname]\n return(sheet.cell(row=rownum, column=columno).value)\n\n def writeData(file,sheetname,rownum,columno,data):\n workbook = openpyxl.load_workbook(file)\n #sheet = workbook.get_sheet_by_name(sheetname)\n sheet = workbook[sheetname]\n sheet.cell(row=rownum, column=columno).value = data\n workbook.save(file)\n",
"step-ids": [
2,
4,
6,
7,
8
]
}
|
[
2,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Classifier(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Classifier(object):
<|reserved_special_token_0|>
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
<|reserved_special_token_1|>
class Classifier(object):
"""
Trained classifier
"""
def __init__(self, classifier, scaler, orient, color_space,
pix_per_cell, cell_per_block, spatial_size, hist_bins):
"""
Initializes an instance.
Parameters
----------
classifier : Trained SciPy classifier for detecting vehicles.
scaler : SciPy scaler to apply to X.
"""
self.classifier = classifier
self.scaler = scaler
self.color_space = color_space
self.orient = orient
self.pix_per_cell = pix_per_cell
self.cell_per_block = cell_per_block
self.spatial_size = spatial_size
self.hist_bins = hist_bins
|
flexible
|
{
"blob_id": "9188d58a6d9e832b8908b823d57249fcdd80ff51",
"index": 171,
"step-1": "<mask token>\n",
"step-2": "class Classifier(object):\n <mask token>\n <mask token>\n",
"step-3": "class Classifier(object):\n <mask token>\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-4": "class Classifier(object):\n \"\"\"\n Trained classifier\n \"\"\"\n\n def __init__(self, classifier, scaler, orient, color_space,\n pix_per_cell, cell_per_block, spatial_size, hist_bins):\n \"\"\"\n Initializes an instance.\n Parameters\n ----------\n classifier : Trained SciPy classifier for detecting vehicles.\n scaler : SciPy scaler to apply to X.\n \"\"\"\n self.classifier = classifier\n self.scaler = scaler\n self.color_space = color_space\n self.orient = orient\n self.pix_per_cell = pix_per_cell\n self.cell_per_block = cell_per_block\n self.spatial_size = spatial_size\n self.hist_bins = hist_bins\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: UTF-8 -*-
'''
model = DQN,DDQN,PDQN,PDDQN,DQN_PER,DDQN_PER,DQN_InAday,DQN_PER_Ipm...
'''
# -----------ContolGame------------
# CartPole - v1, MountainCar - v0, Acrobot - v1, Pendulum - v0
# from run_ContolGame import run_Game
# run_Game('DQN', 'CartPole-v1', episodes=400) # model,env,episodes
# -----------AtariGame - ------------
from run_AtariGame import run_Game
run_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001) # model,env,lifes,episodes
|
normal
|
{
"blob_id": "f49a133fa94aae791ef0f1eec54cf0629f45a0ed",
"index": 5153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)\n",
"step-3": "<mask token>\nfrom run_AtariGame import run_Game\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001)\n",
"step-4": "# -*- coding: UTF-8 -*-\n'''\nmodel = DQN,DDQN,PDQN,PDDQN,DQN_PER,DDQN_PER,DQN_InAday,DQN_PER_Ipm...\n'''\n# -----------ContolGame------------\n# CartPole - v1, MountainCar - v0, Acrobot - v1, Pendulum - v0\n# from run_ContolGame import run_Game\n# run_Game('DQN', 'CartPole-v1', episodes=400) # model,env,episodes\n\n# -----------AtariGame - ------------\nfrom run_AtariGame import run_Game\nrun_Game('DQN_PER', 'Breakout', lifes=5, episodes=40001) # model,env,lifes,episodes\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
BATCH_START=0
TIME_STEPS=20
BATCH_SIZE=50
INPUT_SIZE=1
OUTPUT_SIZE=1
CELL_SIZE=10
LR=0.006
#generate data
def get_batch():
global BATCH_START,TIME_STEPS
xs=np.arange(BATCH_START,BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS))/(10*np.pi)
seq=np.sin(xs)
res=np.cos(xs)
#data move one
BATCH_START+=TIME_STEPS
# all return shape is (batch_size,time_step,input_size)
return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]
#def RNN LSTM Structure
class LSTMRNN(object):
def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):
self.n_steps=n_steps
self.input_size=input_size
self.output_size=output_size
self.cell_size=cell_size
self.batch_size=batch_size
with tf.name_scope('inputs'):
self.xs=tf.placeholder(tf.float32,[None,n_steps,input_size],name='xs')
self.ys=tf.placeholder(tf.float32,[None,n_steps,input_size],name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op=tf.train.AdamOptimizer(LR).minimize(self.cost)
#add input layer
def add_input_layer(self):
#shape(batch,step,input)=>(batch*step,input)
l_in_x=tf.reshape(self.xs,[-1,self.input_size],name='2_2D')
Ws_in=self._weight_variable([self.input_size,self.cell_size])
bs_in=self._bias_variable([self.cell_size])
with tf.name_scope('Wx_plus_b'):
l_in_y=tf.matmul(l_in_x,Ws_in)+bs_in
self.l_in_y=tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='2_3D')
#add cell
def add_cell(self):
lstm_cell=tf.contrib.rnn.BasicLSTMCell(self.cell_size,forget_bias=1.0,state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state=lstm_cell.zero_state(self.batch_size,dtype=tf.float32)
self.cell_outputs,self.cell_final_state=tf.nn.dynamic_rnn(lstm_cell,self.l_in_y,initial_state=self.cell_init_state,time_major=False)
#add output layer
def add_output_layer(self):
l_out_x=tf.reshape(self.cell_outputs,[-1,self.cell_size],name='2_2D')
Ws_out=self._weight_variable([self.cell_size,self.output_size])
bs_out=self._bias_variable([self.output_size,])
with tf.name_scope('Wx_plus_b'):
self.pred=tf.matmul(l_out_x,Ws_out)+bs_out
def compute_cost(self):
losses=tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred,[-1],name='reshape_pred')],
[tf.reshape(self.ys,[-1],name='reshape_target')],
[tf.ones([self.batch_size*self.n_steps],dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost=tf.div(
tf.reduce_sum(losses,name='losses_sum'),
self.batch_size,
name='average_cost'
)
tf.summary.scalar('cost',self.cost)
@staticmethod
def ms_error(labels,logits):
return tf.square(tf.subtract(labels,logits))
def _weight_variable(self,shape,name='weights'):
initializer=tf.random_normal_initializer(mean=0.,stddev=1.,)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
def _bias_variable(self,shape,name='biases'):
initializer=tf.constant_initializer(0.1)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
#train
if __name__=='__main__':
model=LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)
sess=tf.Session()
#merge for tensorboard
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("lstmlogs",sess.graph)
sess.run(tf.global_variables_initializer())
#visiable
plt.ion()
plt.show()
#train for 200
for i in range(200):
seq,res,xs=get_batch()
if i==0:
feed_dict={model.xs:seq,model.ys:res,}
else:
feed_dict={model.xs:seq,model.ys:res,model.cell_init_state:state}
#train
_,cost,state,pred=sess.run([model.train_op,model.cost,model.cell_final_state,model.pred],feed_dict=feed_dict)
#plotting
plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')
plt.ylim((-1.2,1.2))
plt.draw()
plt.pause(0.3)
if i%20==0:
# 4
print('cost',round(cost,4))
result=sess.run(merged,feed_dict)
writer.add_summary(result,i)
|
normal
|
{
"blob_id": "e54078f21176bbb7accb4164e7b56633b13cc693",
"index": 8803,
"step-1": "<mask token>\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n <mask token>\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-3": "<mask token>\nBATCH_START = 0\nTIME_STEPS = 20\nBATCH_SIZE = 50\nINPUT_SIZE = 1\nOUTPUT_SIZE = 1\nCELL_SIZE = 10\nLR = 0.006\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nBATCH_START = 0\nTIME_STEPS = 20\nBATCH_SIZE = 50\nINPUT_SIZE = 1\nOUTPUT_SIZE = 1\nCELL_SIZE = 10\nLR = 0.006\n\n\ndef get_batch():\n global BATCH_START, TIME_STEPS\n xs = np.arange(BATCH_START, BATCH_START + TIME_STEPS * BATCH_SIZE).reshape(\n (BATCH_SIZE, TIME_STEPS)) / (10 * np.pi)\n seq = np.sin(xs)\n res = np.cos(xs)\n BATCH_START += TIME_STEPS\n return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]\n\n\nclass LSTMRNN(object):\n\n def __init__(self, n_steps, input_size, output_size, cell_size, batch_size\n ):\n self.n_steps = n_steps\n self.input_size = input_size\n self.output_size = output_size\n self.cell_size = cell_size\n self.batch_size = batch_size\n with tf.name_scope('inputs'):\n self.xs = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='xs')\n self.ys = tf.placeholder(tf.float32, [None, n_steps, input_size\n ], name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op = tf.train.AdamOptimizer(LR).minimize(self.cost)\n\n def add_input_layer(self):\n l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D')\n Ws_in = self._weight_variable([self.input_size, self.cell_size])\n bs_in = self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in\n self.l_in_y = tf.reshape(l_in_y, [-1, self.n_steps, self.cell_size],\n name='2_3D')\n\n def add_cell(self):\n lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size,\n forget_bias=1.0, state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state = lstm_cell.zero_state(self.batch_size,\n dtype=tf.float32)\n self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn(lstm_cell,\n self.l_in_y, initial_state=self.cell_init_state, time_major=False)\n\n def add_output_layer(self):\n l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name=\n '2_2D')\n Ws_out = self._weight_variable([self.cell_size, self.output_size])\n bs_out = self._bias_variable([self.output_size])\n with tf.name_scope('Wx_plus_b'):\n self.pred = tf.matmul(l_out_x, Ws_out) + bs_out\n\n def compute_cost(self):\n losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.\n reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(\n self.ys, [-1], name='reshape_target')], [tf.ones([self.\n batch_size * self.n_steps], dtype=tf.float32)],\n average_across_timesteps=True, softmax_loss_function=self.\n ms_error, name='losses')\n with tf.name_scope('average_cost'):\n self.cost = tf.div(tf.reduce_sum(losses, name='losses_sum'),\n self.batch_size, name='average_cost')\n tf.summary.scalar('cost', self.cost)\n\n @staticmethod\n def ms_error(labels, logits):\n return tf.square(tf.subtract(labels, logits))\n\n def _weight_variable(self, shape, name='weights'):\n initializer = tf.random_normal_initializer(mean=0.0, stddev=1.0)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n def _bias_variable(self, shape, name='biases'):\n initializer = tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape, initializer=initializer, name=name)\n\n\nif __name__ == '__main__':\n model = LSTMRNN(TIME_STEPS, INPUT_SIZE, OUTPUT_SIZE, CELL_SIZE, BATCH_SIZE)\n sess = tf.Session()\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('lstmlogs', sess.graph)\n sess.run(tf.global_variables_initializer())\n plt.ion()\n plt.show()\n for i in range(200):\n seq, res, xs = get_batch()\n if i == 0:\n feed_dict = {model.xs: seq, model.ys: res}\n else:\n feed_dict = {model.xs: seq, model.ys: res, model.\n cell_init_state: state}\n _, cost, state, pred = sess.run([model.train_op, model.cost, model.\n cell_final_state, model.pred], feed_dict=feed_dict)\n plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[\n :TIME_STEPS], 'b--')\n plt.ylim((-1.2, 1.2))\n plt.draw()\n plt.pause(0.3)\n if i % 20 == 0:\n print('cost', round(cost, 4))\n result = sess.run(merged, feed_dict)\n writer.add_summary(result, i)\n",
"step-5": "import tensorflow as tf\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nBATCH_START=0\nTIME_STEPS=20\nBATCH_SIZE=50\nINPUT_SIZE=1\nOUTPUT_SIZE=1\nCELL_SIZE=10\nLR=0.006\n\n#generate data\ndef get_batch():\n global BATCH_START,TIME_STEPS\n xs=np.arange(BATCH_START,BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS))/(10*np.pi)\n seq=np.sin(xs)\n res=np.cos(xs)\n #data move one\n BATCH_START+=TIME_STEPS\n # all return shape is (batch_size,time_step,input_size)\n return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]\n\n#def RNN LSTM Structure\nclass LSTMRNN(object):\n def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):\n self.n_steps=n_steps\n self.input_size=input_size\n self.output_size=output_size\n self.cell_size=cell_size\n self.batch_size=batch_size\n with tf.name_scope('inputs'):\n self.xs=tf.placeholder(tf.float32,[None,n_steps,input_size],name='xs')\n self.ys=tf.placeholder(tf.float32,[None,n_steps,input_size],name='ys')\n with tf.variable_scope('in_hidden'):\n self.add_input_layer()\n with tf.variable_scope('LSTM_cell'):\n self.add_cell()\n with tf.variable_scope('out_hidden'):\n self.add_output_layer()\n with tf.name_scope('cost'):\n self.compute_cost()\n with tf.name_scope('train'):\n self.train_op=tf.train.AdamOptimizer(LR).minimize(self.cost)\n \n#add input layer\n def add_input_layer(self):\n #shape(batch,step,input)=>(batch*step,input)\n l_in_x=tf.reshape(self.xs,[-1,self.input_size],name='2_2D')\n Ws_in=self._weight_variable([self.input_size,self.cell_size])\n bs_in=self._bias_variable([self.cell_size])\n with tf.name_scope('Wx_plus_b'):\n l_in_y=tf.matmul(l_in_x,Ws_in)+bs_in\n self.l_in_y=tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='2_3D')\n#add cell\n def add_cell(self):\n lstm_cell=tf.contrib.rnn.BasicLSTMCell(self.cell_size,forget_bias=1.0,state_is_tuple=True)\n with tf.name_scope('initial_state'):\n self.cell_init_state=lstm_cell.zero_state(self.batch_size,dtype=tf.float32)\n self.cell_outputs,self.cell_final_state=tf.nn.dynamic_rnn(lstm_cell,self.l_in_y,initial_state=self.cell_init_state,time_major=False)\n#add output layer\n def add_output_layer(self):\n l_out_x=tf.reshape(self.cell_outputs,[-1,self.cell_size],name='2_2D')\n Ws_out=self._weight_variable([self.cell_size,self.output_size])\n bs_out=self._bias_variable([self.output_size,])\n with tf.name_scope('Wx_plus_b'):\n self.pred=tf.matmul(l_out_x,Ws_out)+bs_out\n \n def compute_cost(self):\n losses=tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [tf.reshape(self.pred,[-1],name='reshape_pred')],\n [tf.reshape(self.ys,[-1],name='reshape_target')],\n [tf.ones([self.batch_size*self.n_steps],dtype=tf.float32)],\n average_across_timesteps=True,\n softmax_loss_function=self.ms_error,\n name='losses'\n )\n with tf.name_scope('average_cost'):\n self.cost=tf.div(\n tf.reduce_sum(losses,name='losses_sum'),\n self.batch_size,\n name='average_cost'\n )\n tf.summary.scalar('cost',self.cost)\n\n @staticmethod\n def ms_error(labels,logits):\n return tf.square(tf.subtract(labels,logits))\n def _weight_variable(self,shape,name='weights'):\n initializer=tf.random_normal_initializer(mean=0.,stddev=1.,)\n return tf.get_variable(shape=shape,initializer=initializer,name=name)\n \n def _bias_variable(self,shape,name='biases'):\n initializer=tf.constant_initializer(0.1)\n return tf.get_variable(shape=shape,initializer=initializer,name=name)\n\n#train\nif __name__=='__main__':\n model=LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)\n sess=tf.Session()\n #merge for tensorboard\n merged=tf.summary.merge_all()\n writer=tf.summary.FileWriter(\"lstmlogs\",sess.graph)\n sess.run(tf.global_variables_initializer())\n\n #visiable\n plt.ion()\n plt.show()\n\n #train for 200\n for i in range(200):\n seq,res,xs=get_batch()\n if i==0:\n feed_dict={model.xs:seq,model.ys:res,} \n else:\n feed_dict={model.xs:seq,model.ys:res,model.cell_init_state:state}\n #train\n _,cost,state,pred=sess.run([model.train_op,model.cost,model.cell_final_state,model.pred],feed_dict=feed_dict)\n\n\n #plotting\n plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')\n plt.ylim((-1.2,1.2))\n plt.draw()\n plt.pause(0.3)\n\n if i%20==0:\n # 4 \n print('cost',round(cost,4))\n result=sess.run(merged,feed_dict)\n writer.add_summary(result,i)\n\n\n\n\n\n\n\n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
text=open('mytext.txt','w')
x=text.write("I like coding\nit is a new part\nof my life!!!")
text=open('mytext.txt')
read=text.readlines()
i=0
counter=0
total=0
print("number of lines :"+str(len(read)))
while i<=len(read)-1:
counter=counter+read[i].count('\n') + read[i].count(' ')
total+=len(read[i])-read[i].count('\n') - read[i].count(' ')
i+=1
counter+=1
print('Number of words is :'+str(counter))
print('total number of letters are :' +str(total))
|
normal
|
{
"blob_id": "5ad8db85f4f705173cf5d0649af6039ebe1544b2",
"index": 7488,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-3": "text = open('mytext.txt', 'w')\nx = text.write(\"\"\"I like coding\nit is a new part\nof my life!!!\"\"\")\ntext = open('mytext.txt')\nread = text.readlines()\ni = 0\ncounter = 0\ntotal = 0\nprint('number of lines :' + str(len(read)))\nwhile i <= len(read) - 1:\n counter = counter + read[i].count('\\n') + read[i].count(' ')\n total += len(read[i]) - read[i].count('\\n') - read[i].count(' ')\n i += 1\ncounter += 1\nprint('Number of words is :' + str(counter))\nprint('total number of letters are :' + str(total))\n",
"step-4": "text=open('mytext.txt','w')\nx=text.write(\"I like coding\\nit is a new part\\nof my life!!!\")\ntext=open('mytext.txt')\nread=text.readlines()\ni=0\ncounter=0\ntotal=0\nprint(\"number of lines :\"+str(len(read)))\n\nwhile i<=len(read)-1:\n counter=counter+read[i].count('\\n') + read[i].count(' ')\n total+=len(read[i])-read[i].count('\\n') - read[i].count(' ')\n i+=1\ncounter+=1\nprint('Number of words is :'+str(counter))\nprint('total number of letters are :' +str(total))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__version__ = '0.2.11'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__version__ = '0.2.11'
from climlab.utils import constants
from climlab.utils import thermo, legendre
from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel
from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal
from climlab.domain import domain
from climlab.domain.field import Field, global_mean
from climlab.domain.axis import Axis
from climlab.process.process import Process, process_like, get_axes
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.process.implicit import ImplicitProcess
from climlab.process.diagnostic import DiagnosticProcess
from climlab.process.energy_budget import EnergyBudget
<|reserved_special_token_1|>
__version__ = '0.2.11'
# This list defines all the modules that will be loaded if a user invokes
# from climLab import *
# totally out of date!
#__all__ = ["constants", "thermo", "orbital_table",
# "long_orbital_table", "insolation", "ebm",
# "column", "convadj"]
#from climlab import radiation
# this should ensure that we can still import constants.py as climlab.constants
from climlab.utils import constants
from climlab.utils import thermo, legendre
# some more useful shorcuts
#from climlab.model import ebm, column
from climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel
from climlab.model.ebm import EBM, EBM_annual, EBM_seasonal
from climlab.domain import domain
from climlab.domain.field import Field, global_mean
from climlab.domain.axis import Axis
from climlab.process.process import Process, process_like, get_axes
from climlab.process.time_dependent_process import TimeDependentProcess
from climlab.process.implicit import ImplicitProcess
from climlab.process.diagnostic import DiagnosticProcess
from climlab.process.energy_budget import EnergyBudget
|
flexible
|
{
"blob_id": "8251a9c798b3cdc2f374d0a0406ccfaa11b7c5e3",
"index": 5699,
"step-1": "<mask token>\n",
"step-2": "__version__ = '0.2.11'\n<mask token>\n",
"step-3": "__version__ = '0.2.11'\nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n",
"step-4": "__version__ = '0.2.11'\n\n# This list defines all the modules that will be loaded if a user invokes\n# from climLab import *\n\n# totally out of date!\n\n#__all__ = [\"constants\", \"thermo\", \"orbital_table\",\n# \"long_orbital_table\", \"insolation\", \"ebm\",\n# \"column\", \"convadj\"]\n\n#from climlab import radiation\n# this should ensure that we can still import constants.py as climlab.constants \nfrom climlab.utils import constants\nfrom climlab.utils import thermo, legendre\n# some more useful shorcuts\n#from climlab.model import ebm, column\nfrom climlab.model.column import GreyRadiationModel, RadiativeConvectiveModel, BandRCModel\nfrom climlab.model.ebm import EBM, EBM_annual, EBM_seasonal\nfrom climlab.domain import domain\nfrom climlab.domain.field import Field, global_mean\nfrom climlab.domain.axis import Axis\nfrom climlab.process.process import Process, process_like, get_axes\nfrom climlab.process.time_dependent_process import TimeDependentProcess\nfrom climlab.process.implicit import ImplicitProcess\nfrom climlab.process.diagnostic import DiagnosticProcess\nfrom climlab.process.energy_budget import EnergyBudget\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Content-type: text/html\n')
<|reserved_special_token_0|>
if 'echooUser' in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
<|reserved_special_token_0|>
if userName != '':
if EchooFunctions.checkUserType(cursor, userName) == 'administrator':
admin = True
<|reserved_special_token_0|>
if userID != '' and receiverID != '':
try:
SQL = (
'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(userID
) + ' and m.sender = ' + str(receiverID)
SQL += (
' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(receiverID
) + ' and m.sender = ' + str(userID)
SQL += ' Order By messageID ;'
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, 'Error:', e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar = row[3]
specialChar2 = ''
specialChar = EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count <= 20:
specialChar2 += x
word_count += 1
else:
specialChar2 += x + '<p>'
word_count = 0
if count >= 5:
chatroom += '<li class="chatDate">' + str(row[4]) + '</li>'
count = 0
if str(row[0]) == str(userID):
count += 1
chatroom += ('<li class="mainUser">' +
'<a href="userProfile.cgi?user=' + str(row[0]) +
'">' + row[1] + '</a><img src="images/user/' + row[
2] + '" alt="club1">')
chatroom += ('<br><div class="messageLine">' +
specialChar2 + '</div></li>')
else:
count += 1
chatroom += (
'<li class="otherUser"><img src="images/user/' +
row[2] + '" alt="club1">')
chatroom += ('<a href="userProfile.cgi?userid=' + str(
row[0]) + '">' + row[1] +
'</a><br><div class="messageLine">' + specialChar2 +
'</div></li>')
if userID == '' or receiverID == '':
content = (
"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>"
)
print(content)
print(chatroom)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Content-type: text/html\n')
form = cgi.FieldStorage()
user = 'i494f18_team34'
db_pass = 'my+sql=i494f18_team34'
db_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,
passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user', '')
userName = ''
userID = ''
if 'echooUser' in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
if userName != '':
if EchooFunctions.checkUserType(cursor, userName) == 'administrator':
admin = True
friend = ''
friendList = ''
chatroom = ''
userList = []
if userID != '' and receiverID != '':
try:
SQL = (
'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(userID
) + ' and m.sender = ' + str(receiverID)
SQL += (
' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(receiverID
) + ' and m.sender = ' + str(userID)
SQL += ' Order By messageID ;'
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, 'Error:', e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar = row[3]
specialChar2 = ''
specialChar = EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count <= 20:
specialChar2 += x
word_count += 1
else:
specialChar2 += x + '<p>'
word_count = 0
if count >= 5:
chatroom += '<li class="chatDate">' + str(row[4]) + '</li>'
count = 0
if str(row[0]) == str(userID):
count += 1
chatroom += ('<li class="mainUser">' +
'<a href="userProfile.cgi?user=' + str(row[0]) +
'">' + row[1] + '</a><img src="images/user/' + row[
2] + '" alt="club1">')
chatroom += ('<br><div class="messageLine">' +
specialChar2 + '</div></li>')
else:
count += 1
chatroom += (
'<li class="otherUser"><img src="images/user/' +
row[2] + '" alt="club1">')
chatroom += ('<a href="userProfile.cgi?userid=' + str(
row[0]) + '">' + row[1] +
'</a><br><div class="messageLine">' + specialChar2 +
'</div></li>')
if userID == '' or receiverID == '':
content = (
"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>"
)
print(content)
print(chatroom)
<|reserved_special_token_1|>
import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os
print('Content-type: text/html\n')
form = cgi.FieldStorage()
user = 'i494f18_team34'
db_pass = 'my+sql=i494f18_team34'
db_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,
passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user', '')
userName = ''
userID = ''
if 'echooUser' in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
if userName != '':
if EchooFunctions.checkUserType(cursor, userName) == 'administrator':
admin = True
friend = ''
friendList = ''
chatroom = ''
userList = []
if userID != '' and receiverID != '':
try:
SQL = (
'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(userID
) + ' and m.sender = ' + str(receiverID)
SQL += (
' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '
)
SQL += 'm.sender and m.receiver = ' + str(receiverID
) + ' and m.sender = ' + str(userID)
SQL += ' Order By messageID ;'
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, 'Error:', e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar = row[3]
specialChar2 = ''
specialChar = EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count <= 20:
specialChar2 += x
word_count += 1
else:
specialChar2 += x + '<p>'
word_count = 0
if count >= 5:
chatroom += '<li class="chatDate">' + str(row[4]) + '</li>'
count = 0
if str(row[0]) == str(userID):
count += 1
chatroom += ('<li class="mainUser">' +
'<a href="userProfile.cgi?user=' + str(row[0]) +
'">' + row[1] + '</a><img src="images/user/' + row[
2] + '" alt="club1">')
chatroom += ('<br><div class="messageLine">' +
specialChar2 + '</div></li>')
else:
count += 1
chatroom += (
'<li class="otherUser"><img src="images/user/' +
row[2] + '" alt="club1">')
chatroom += ('<a href="userProfile.cgi?userid=' + str(
row[0]) + '">' + row[1] +
'</a><br><div class="messageLine">' + specialChar2 +
'</div></li>')
if userID == '' or receiverID == '':
content = (
"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>"
)
print(content)
print(chatroom)
<|reserved_special_token_1|>
#! /usr/bin/env python3
import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os
print ('Content-type: text/html\n')
form = cgi.FieldStorage()
#database connection
user = "i494f18_team34"
db_pass = "my+sql=i494f18_team34"
db_con = MySQLdb.connect(host="db.soic.indiana.edu", port = 3306, user=user, passwd=db_pass, db=user)
cursor = db_con.cursor()
receiverID = form.getfirst('user','')
userName = ""
userID = ""
if "echooUser" in str(os.environ):
userName = EchooFunctions.getUserName()
userName = userName[0]
userID = EchooFunctions.getUserID(cursor, userName)
admin = False
#change the status of veriable
if userName != "":
if EchooFunctions.checkUserType(cursor, userName) == "administrator":
admin = True
#main contents to insert
friend = ""
friendList = ""
chatroom = ""
userList = []
if userID != "" and receiverID !="":
try:
SQL = "select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(userID)+" and m.sender = "+str(receiverID)
SQL+= " Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = "
SQL+= "m.sender and m.receiver = "+str(receiverID)+" and m.sender = "+str(userID)
SQL+=" Order By messageID ;"
cursor.execute(SQL)
results = cursor.fetchall()
except Exception as e:
print('<p>Something went wrong with the first SQL!</p>')
print(SQL, "Error:", e)
else:
if results:
count = 5
for row in results:
word_count = 0
specialChar=row[3]
specialChar2 = ""
specialChar=EchooFunctions.returnSpecialChara(specialChar)
for x in specialChar:
if word_count<=20:
specialChar2 += x
word_count+=1
else:
specialChar2 += x +"<p>"
word_count = 0
if count >= 5:
chatroom+='<li class="chatDate">'+str(row[4])+'</li>'
count=0
if str(row[0]) ==str(userID):
count+=1
chatroom+='<li class="mainUser">'+'<a href="userProfile.cgi?user='+str(row[0])+'">'+row[1]+'</a><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<br><div class="messageLine">'+specialChar2+'</div></li>'
else:
count+=1
chatroom+='<li class="otherUser"><img src="images/user/'+row[2]+'" alt="club1">'
chatroom+='<a href="userProfile.cgi?userid='+str(row[0])+'">'+row[1]+'</a><br><div class="messageLine">'+specialChar2+'</div></li>'
if userID == "" or receiverID =="":
content ="""<p>You don't have right access to this page</p>
<a href='index.cgi'></a>"""
print(content)
print(chatroom)
|
flexible
|
{
"blob_id": "dc88686d3cbb4223b4de6847bf4fc29b93054b00",
"index": 495,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Content-type: text/html\\n')\n<mask token>\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n<mask token>\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\n<mask token>\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-3": "<mask token>\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-4": "import EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint('Content-type: text/html\\n')\nform = cgi.FieldStorage()\nuser = 'i494f18_team34'\ndb_pass = 'my+sql=i494f18_team34'\ndb_con = MySQLdb.connect(host='db.soic.indiana.edu', port=3306, user=user,\n passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user', '')\nuserName = ''\nuserID = ''\nif 'echooUser' in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\nadmin = False\nif userName != '':\n if EchooFunctions.checkUserType(cursor, userName) == 'administrator':\n admin = True\nfriend = ''\nfriendList = ''\nchatroom = ''\nuserList = []\nif userID != '' and receiverID != '':\n try:\n SQL = (\n 'select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(userID\n ) + ' and m.sender = ' + str(receiverID)\n SQL += (\n ' Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = '\n )\n SQL += 'm.sender and m.receiver = ' + str(receiverID\n ) + ' and m.sender = ' + str(userID)\n SQL += ' Order By messageID ;'\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, 'Error:', e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar = row[3]\n specialChar2 = ''\n specialChar = EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count <= 20:\n specialChar2 += x\n word_count += 1\n else:\n specialChar2 += x + '<p>'\n word_count = 0\n if count >= 5:\n chatroom += '<li class=\"chatDate\">' + str(row[4]) + '</li>'\n count = 0\n if str(row[0]) == str(userID):\n count += 1\n chatroom += ('<li class=\"mainUser\">' +\n '<a href=\"userProfile.cgi?user=' + str(row[0]) +\n '\">' + row[1] + '</a><img src=\"images/user/' + row[\n 2] + '\" alt=\"club1\">')\n chatroom += ('<br><div class=\"messageLine\">' +\n specialChar2 + '</div></li>')\n else:\n count += 1\n chatroom += (\n '<li class=\"otherUser\"><img src=\"images/user/' +\n row[2] + '\" alt=\"club1\">')\n chatroom += ('<a href=\"userProfile.cgi?userid=' + str(\n row[0]) + '\">' + row[1] +\n '</a><br><div class=\"messageLine\">' + specialChar2 +\n '</div></li>')\nif userID == '' or receiverID == '':\n content = (\n \"<p>You don't have right access to this page</p>\\n<a href='index.cgi'></a>\"\n )\n print(content)\nprint(chatroom)\n",
"step-5": "#! /usr/bin/env python3\n\nimport EchooFunctions, cgi, MySQLdb, hashlib, time, requests, os\nprint ('Content-type: text/html\\n')\n\nform = cgi.FieldStorage()\n\n#database connection\nuser = \"i494f18_team34\"\ndb_pass = \"my+sql=i494f18_team34\"\ndb_con = MySQLdb.connect(host=\"db.soic.indiana.edu\", port = 3306, user=user, passwd=db_pass, db=user)\ncursor = db_con.cursor()\nreceiverID = form.getfirst('user','')\nuserName = \"\"\nuserID = \"\"\nif \"echooUser\" in str(os.environ):\n userName = EchooFunctions.getUserName()\n userName = userName[0]\n userID = EchooFunctions.getUserID(cursor, userName)\n\nadmin = False\n#change the status of veriable\nif userName != \"\":\n if EchooFunctions.checkUserType(cursor, userName) == \"administrator\":\n admin = True\n#main contents to insert\nfriend = \"\"\nfriendList = \"\"\nchatroom = \"\"\nuserList = []\nif userID != \"\" and receiverID !=\"\":\n try:\n SQL = \"select u.userID, u.username, u.icon, m.detail, m.time_in,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(userID)+\" and m.sender = \"+str(receiverID)\n SQL+= \" Union select u.userID, u.username, u.icon, m.detail, m.time_in ,m.messageID from user as u, private_message as m where u.userID = \"\n SQL+= \"m.sender and m.receiver = \"+str(receiverID)+\" and m.sender = \"+str(userID)\n SQL+=\" Order By messageID ;\"\n cursor.execute(SQL)\n results = cursor.fetchall()\n except Exception as e:\n print('<p>Something went wrong with the first SQL!</p>')\n print(SQL, \"Error:\", e)\n else:\n if results:\n count = 5\n for row in results:\n word_count = 0\n specialChar=row[3]\n specialChar2 = \"\"\n specialChar=EchooFunctions.returnSpecialChara(specialChar)\n for x in specialChar:\n if word_count<=20:\n specialChar2 += x\n word_count+=1\n else:\n specialChar2 += x +\"<p>\"\n word_count = 0\n if count >= 5:\n chatroom+='<li class=\"chatDate\">'+str(row[4])+'</li>'\n count=0\n if str(row[0]) ==str(userID):\n count+=1\n chatroom+='<li class=\"mainUser\">'+'<a href=\"userProfile.cgi?user='+str(row[0])+'\">'+row[1]+'</a><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n else:\n count+=1\n chatroom+='<li class=\"otherUser\"><img src=\"images/user/'+row[2]+'\" alt=\"club1\">'\n chatroom+='<a href=\"userProfile.cgi?userid='+str(row[0])+'\">'+row[1]+'</a><br><div class=\"messageLine\">'+specialChar2+'</div></li>'\n\nif userID == \"\" or receiverID ==\"\":\n content =\"\"\"<p>You don't have right access to this page</p>\n<a href='index.cgi'></a>\"\"\"\n print(content)\nprint(chatroom)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Stirng - Liste - Dosya
- Fonksiyon yazıyoruz.
- Bu fonksiyon iki parametre alacak. (dosya, string)
1. sorun : Dosyanın içinde string var ise True döndürecek yok ise False
2. sorun : Dosyanın içinde string bulunursa ilk bulunduğu konumu return edecek
3. sorun : Dosyanın içerisinde yazdığımız strinng kaç kere var onu liste halinde return eden fonksiyon
"""
def fonkString(text, string):
if string in text:
print("TRUE")
print(text.index(string), ". sirada ilk", string, "bulundu")
print(text.count(string),"tane",string, "var")
liste = []
for i in range(len(text)):
if(text[i] == string):
liste.append(i)
for x in liste:
print(x)
else:
print("FALSE")
fonkString("Programlama laboratuvari calisma sorulari dosya string liste kullanma ", "m")
|
normal
|
{
"blob_id": "0d3cc85cd18ee197b24c8b01b71afe82110bfad2",
"index": 3487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fonkString(text, string):\n if string in text:\n print('TRUE')\n print(text.index(string), '. sirada ilk', string, 'bulundu')\n print(text.count(string), 'tane', string, 'var')\n liste = []\n for i in range(len(text)):\n if text[i] == string:\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print('FALSE')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fonkString(text, string):\n if string in text:\n print('TRUE')\n print(text.index(string), '. sirada ilk', string, 'bulundu')\n print(text.count(string), 'tane', string, 'var')\n liste = []\n for i in range(len(text)):\n if text[i] == string:\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print('FALSE')\n\n\nfonkString(\n 'Programlama laboratuvari calisma sorulari dosya string liste kullanma ',\n 'm')\n",
"step-4": "\"\"\"\nStirng - Liste - Dosya\n - Fonksiyon yazıyoruz.\n - Bu fonksiyon iki parametre alacak. (dosya, string)\n 1. sorun : Dosyanın içinde string var ise True döndürecek yok ise False \n 2. sorun : Dosyanın içinde string bulunursa ilk bulunduğu konumu return edecek\n 3. sorun : Dosyanın içerisinde yazdığımız strinng kaç kere var onu liste halinde return eden fonksiyon\n \n\"\"\"\n\ndef fonkString(text, string):\n if string in text:\n print(\"TRUE\")\n print(text.index(string), \". sirada ilk\", string, \"bulundu\")\n print(text.count(string),\"tane\",string, \"var\")\n\n liste = []\n\n for i in range(len(text)):\n if(text[i] == string):\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print(\"FALSE\")\n\nfonkString(\"Programlama laboratuvari calisma sorulari dosya string liste kullanma \", \"m\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__version__ = "alph 1.0"
|
normal
|
{
"blob_id": "2c4eb07a32c6903ae31006f42c13c55e6cc42eb5",
"index": 5245,
"step-1": "<mask token>\n",
"step-2": "__version__ = 'alph 1.0'\n",
"step-3": "__version__ = \"alph 1.0\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import tensorflow as tf
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None,4),name='gt_loc')
def build_graph(self, buttom_layer):
self.variables = []
self.kernel_weights = []
pool = tf.nn.max_pool(buttom_layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool')
drop = tf.nn.dropout(pool, 0.3)
with tf.name_scope('fc1') as scope:
shape = int(np.prod(drop.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 3000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.float32),
trainable=True, name='biases')
pool_flat = tf.reshape(drop, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)
fc1 = tf.nn.relu(fc1l)
self.kernel_weights += [fc1w]
self.variables += [fc1w, fc1b]
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([3000, 4],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32),
trainable=True, name='biases')
self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)
self.kernel_weights += [fc2w]
self.variables += [fc2w, fc2b]
def loss(self):
with tf.name_scope(self.scope) as scope:
beta = tf.constant(0.05, name='beta')
loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc, self.logit))
loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.kernel_weights]
loss_wd = beta * tf.add_n(loss_wd)
total_loss = loss_rms + loss_wd
return total_loss
|
normal
|
{
"blob_id": "dd4dc1c4a0dc47711d1d0512ef3f6b7908735766",
"index": 3149,
"step-1": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n <mask token>\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-3": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n\n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME', name='pool')\n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.\n float32), trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32\n ), trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-4": "import numpy as np\nimport tensorflow as tf\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n\n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME', name='pool')\n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.\n float32), trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32\n ), trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-5": "\n\nimport numpy as np \nimport tensorflow as tf\n\n\nclass LocNet: \n def __init__(self, scope, buttom_layer):\n self.scope = scope \n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None,4),name='gt_loc')\n \n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool')\n \n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.float32),\n trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n \n\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32),\n trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n \n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc, self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
n=int(input())
k=[4,7,47,74,44,77,444,447,474,477,777,774,747,7444]
f=0
for i in k:
if(n%i==0):
f=1
print("YES")
break;
if(f==0):
print("NO")
|
normal
|
{
"blob_id": "6161653fb789040d084e475e0ae25921e2e0676b",
"index": 2496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in k:\n if n % i == 0:\n f = 1\n print('YES')\n break\nif f == 0:\n print('NO')\n",
"step-3": "n = int(input())\nk = [4, 7, 47, 74, 44, 77, 444, 447, 474, 477, 777, 774, 747, 7444]\nf = 0\nfor i in k:\n if n % i == 0:\n f = 1\n print('YES')\n break\nif f == 0:\n print('NO')\n",
"step-4": "n=int(input())\nk=[4,7,47,74,44,77,444,447,474,477,777,774,747,7444]\nf=0\nfor i in k:\n if(n%i==0):\n f=1\n print(\"YES\")\n break;\nif(f==0):\n print(\"NO\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class netdespatch_config(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class netdespatch_config(models.Model):
_name = 'netdespatch.config'
name = fields.Char(String='Name')
url = fields.Char(string='URL')
rm_enable = fields.Boolean('Enable Royal Mail')
domestic_name = fields.Char(string='Username', help='Netdespatch Username')
domestic_pwd = fields.Char(string='Password', size=256, copy=False,
help='Netdespatch Password')
domestic_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
category = fields.Selection([('is_domestic', 'Is Domestic'), (
'is_international', 'Is International')], string='Category',
default='is_domestic')
in_name = fields.Char(string='Username', help='Netdespatch Username')
in_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
in_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
apc_enable = fields.Boolean('Enable APC')
apc_name = fields.Char(string='Username', help='Netdespatch Username')
apc_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
apc_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
ukmail_enable = fields.Boolean('Enable UKmail')
ukmail_name = fields.Char(string='Username', help='Netdespatch Username')
ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
ukmail_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
yodel_enable = fields.Boolean('Enable Yodel')
yodel_name = fields.Char(string='Username', help='Netdespatch Username')
yodel_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
yodel_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
<|reserved_special_token_1|>
from odoo import models, fields, api, _
import odoo.addons.decimal_precision as dp
class netdespatch_config(models.Model):
_name = 'netdespatch.config'
name = fields.Char(String='Name')
url = fields.Char(string='URL')
rm_enable = fields.Boolean('Enable Royal Mail')
domestic_name = fields.Char(string='Username', help='Netdespatch Username')
domestic_pwd = fields.Char(string='Password', size=256, copy=False,
help='Netdespatch Password')
domestic_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
category = fields.Selection([('is_domestic', 'Is Domestic'), (
'is_international', 'Is International')], string='Category',
default='is_domestic')
in_name = fields.Char(string='Username', help='Netdespatch Username')
in_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
in_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
apc_enable = fields.Boolean('Enable APC')
apc_name = fields.Char(string='Username', help='Netdespatch Username')
apc_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
apc_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
ukmail_enable = fields.Boolean('Enable UKmail')
ukmail_name = fields.Char(string='Username', help='Netdespatch Username')
ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
ukmail_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
yodel_enable = fields.Boolean('Enable Yodel')
yodel_name = fields.Char(string='Username', help='Netdespatch Username')
yodel_pwd = fields.Char(string='Password', size=256, copy=False, help=
'Netdespatch Password')
yodel_accountid = fields.Char(string='Account ID', size=256, help=
'Netdespatch Account Number')
<|reserved_special_token_1|>
from odoo import models, fields, api, _
import odoo.addons.decimal_precision as dp
class netdespatch_config(models.Model):
_name = 'netdespatch.config'
name = fields.Char(String='Name')
url = fields.Char(string='URL')
# Royal Mail
rm_enable = fields.Boolean('Enable Royal Mail')
domestic_name = fields.Char(string='Username', help="Netdespatch Username")
domestic_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
domestic_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
category = fields.Selection([('is_domestic', 'Is Domestic'),
('is_international', 'Is International')
], string='Category', default='is_domestic')
in_name = fields.Char(string='Username', help="Netdespatch Username")
in_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
in_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#Apc
apc_enable = fields.Boolean('Enable APC')
apc_name = fields.Char(string='Username', help="Netdespatch Username")
apc_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
apc_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#ukMail
ukmail_enable = fields.Boolean('Enable UKmail')
ukmail_name = fields.Char(string='Username', help="Netdespatch Username")
ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
ukmail_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
#YODEL
yodel_enable = fields.Boolean('Enable Yodel')
yodel_name = fields.Char(string='Username', help="Netdespatch Username")
yodel_pwd = fields.Char(string='Password', size=256, copy=False, help="Netdespatch Password")
yodel_accountid = fields.Char(string='Account ID', size=256, help="Netdespatch Account Number")
|
flexible
|
{
"blob_id": "407f549cf68660c8f8535ae0bed373e2f54af877",
"index": 5731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass netdespatch_config(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass netdespatch_config(models.Model):\n _name = 'netdespatch.config'\n name = fields.Char(String='Name')\n url = fields.Char(string='URL')\n rm_enable = fields.Boolean('Enable Royal Mail')\n domestic_name = fields.Char(string='Username', help='Netdespatch Username')\n domestic_pwd = fields.Char(string='Password', size=256, copy=False,\n help='Netdespatch Password')\n domestic_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n category = fields.Selection([('is_domestic', 'Is Domestic'), (\n 'is_international', 'Is International')], string='Category',\n default='is_domestic')\n in_name = fields.Char(string='Username', help='Netdespatch Username')\n in_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n in_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n apc_enable = fields.Boolean('Enable APC')\n apc_name = fields.Char(string='Username', help='Netdespatch Username')\n apc_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n apc_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n ukmail_enable = fields.Boolean('Enable UKmail')\n ukmail_name = fields.Char(string='Username', help='Netdespatch Username')\n ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n ukmail_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n yodel_enable = fields.Boolean('Enable Yodel')\n yodel_name = fields.Char(string='Username', help='Netdespatch Username')\n yodel_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n yodel_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n",
"step-4": "from odoo import models, fields, api, _\nimport odoo.addons.decimal_precision as dp\n\n\nclass netdespatch_config(models.Model):\n _name = 'netdespatch.config'\n name = fields.Char(String='Name')\n url = fields.Char(string='URL')\n rm_enable = fields.Boolean('Enable Royal Mail')\n domestic_name = fields.Char(string='Username', help='Netdespatch Username')\n domestic_pwd = fields.Char(string='Password', size=256, copy=False,\n help='Netdespatch Password')\n domestic_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n category = fields.Selection([('is_domestic', 'Is Domestic'), (\n 'is_international', 'Is International')], string='Category',\n default='is_domestic')\n in_name = fields.Char(string='Username', help='Netdespatch Username')\n in_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n in_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n apc_enable = fields.Boolean('Enable APC')\n apc_name = fields.Char(string='Username', help='Netdespatch Username')\n apc_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n apc_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n ukmail_enable = fields.Boolean('Enable UKmail')\n ukmail_name = fields.Char(string='Username', help='Netdespatch Username')\n ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n ukmail_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n yodel_enable = fields.Boolean('Enable Yodel')\n yodel_name = fields.Char(string='Username', help='Netdespatch Username')\n yodel_pwd = fields.Char(string='Password', size=256, copy=False, help=\n 'Netdespatch Password')\n yodel_accountid = fields.Char(string='Account ID', size=256, help=\n 'Netdespatch Account Number')\n",
"step-5": "from odoo import models, fields, api, _\nimport odoo.addons.decimal_precision as dp\n\nclass netdespatch_config(models.Model):\n _name = 'netdespatch.config'\n name = fields.Char(String='Name')\n url = fields.Char(string='URL')\n\n # Royal Mail\n rm_enable = fields.Boolean('Enable Royal Mail')\n domestic_name = fields.Char(string='Username', help=\"Netdespatch Username\")\n domestic_pwd = fields.Char(string='Password', size=256, copy=False, help=\"Netdespatch Password\")\n domestic_accountid = fields.Char(string='Account ID', size=256, help=\"Netdespatch Account Number\")\n category = fields.Selection([('is_domestic', 'Is Domestic'),\n ('is_international', 'Is International')\n ], string='Category', default='is_domestic')\n\n in_name = fields.Char(string='Username', help=\"Netdespatch Username\")\n in_pwd = fields.Char(string='Password', size=256, copy=False, help=\"Netdespatch Password\")\n in_accountid = fields.Char(string='Account ID', size=256, help=\"Netdespatch Account Number\")\n\n\n #Apc\n apc_enable = fields.Boolean('Enable APC')\n apc_name = fields.Char(string='Username', help=\"Netdespatch Username\")\n apc_pwd = fields.Char(string='Password', size=256, copy=False, help=\"Netdespatch Password\")\n apc_accountid = fields.Char(string='Account ID', size=256, help=\"Netdespatch Account Number\")\n\n #ukMail\n ukmail_enable = fields.Boolean('Enable UKmail')\n ukmail_name = fields.Char(string='Username', help=\"Netdespatch Username\")\n ukmail_pwd = fields.Char(string='Password', size=256, copy=False, help=\"Netdespatch Password\")\n ukmail_accountid = fields.Char(string='Account ID', size=256, help=\"Netdespatch Account Number\")\n\n\n #YODEL\n yodel_enable = fields.Boolean('Enable Yodel')\n yodel_name = fields.Char(string='Username', help=\"Netdespatch Username\")\n yodel_pwd = fields.Char(string='Password', size=256, copy=False, help=\"Netdespatch Password\")\n yodel_accountid = fields.Char(string='Account ID', size=256, help=\"Netdespatch Account Number\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#create a list
a = [2,3,4,5,6,7,8,9,10]
print(a)
#indexing
b = int(input('Enter indexing value:'))
print('The result is:',a[b])
print(a[8])
print(a[-1])
#slicing
print(a[0:3])
print(a[0:])
#conconteation
b=[20,30]
print(a+b)
#Repetition
print(b*3)
#updating
print(a[2])
a[2]=100
print(a)
#membership
print(5 in a)
#comparison
c=[2,3,4]
print(a==b)
print(a!=b)
#slice
a=[9,8,7,6,5,4]
print(a[0:3])
print(a[:4])
print(a[1:])
print(a[:])
print(a[2:2])
print(a[0:6:2])
print(a[0:6:3])
'''#a.apppend(element)
a=[1,2,3,4,5]
b=int(input('Enter number to append:'))
a.append(b)
print(a)
#insert(index,element)
a.insert(0,0)
print(a)
#a.extend(c)
c=[6,7,8,9]
a.extend(c)
print(a)
#one more'''
|
normal
|
{
"blob_id": "f7d29dd1d990b3e07a7c07a559cf5658b6390e41",
"index": 4601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a)\n<mask token>\nprint('The result is:', a[b])\nprint(a[8])\nprint(a[-1])\nprint(a[0:3])\nprint(a[0:])\n<mask token>\nprint(a + b)\nprint(b * 3)\nprint(a[2])\n<mask token>\nprint(a)\nprint(5 in a)\n<mask token>\nprint(a == b)\nprint(a != b)\n<mask token>\nprint(a[0:3])\nprint(a[:4])\nprint(a[1:])\nprint(a[:])\nprint(a[2:2])\nprint(a[0:6:2])\nprint(a[0:6:3])\n<mask token>\n",
"step-3": "a = [2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint(a)\nb = int(input('Enter indexing value:'))\nprint('The result is:', a[b])\nprint(a[8])\nprint(a[-1])\nprint(a[0:3])\nprint(a[0:])\nb = [20, 30]\nprint(a + b)\nprint(b * 3)\nprint(a[2])\na[2] = 100\nprint(a)\nprint(5 in a)\nc = [2, 3, 4]\nprint(a == b)\nprint(a != b)\na = [9, 8, 7, 6, 5, 4]\nprint(a[0:3])\nprint(a[:4])\nprint(a[1:])\nprint(a[:])\nprint(a[2:2])\nprint(a[0:6:2])\nprint(a[0:6:3])\n<mask token>\n",
"step-4": "\n#create a list\na = [2,3,4,5,6,7,8,9,10]\nprint(a)\n#indexing\nb = int(input('Enter indexing value:'))\nprint('The result is:',a[b])\nprint(a[8])\nprint(a[-1])\n\n#slicing\nprint(a[0:3])\nprint(a[0:])\n\n#conconteation\nb=[20,30]\nprint(a+b)\n\n#Repetition\nprint(b*3)\n\n#updating\nprint(a[2])\na[2]=100\nprint(a)\n\n#membership\nprint(5 in a)\n\n#comparison\nc=[2,3,4]\nprint(a==b)\nprint(a!=b)\n\n#slice\na=[9,8,7,6,5,4]\nprint(a[0:3])\n\nprint(a[:4])\nprint(a[1:])\nprint(a[:])\nprint(a[2:2])\nprint(a[0:6:2])\nprint(a[0:6:3])\n\n'''#a.apppend(element)\na=[1,2,3,4,5]\nb=int(input('Enter number to append:'))\na.append(b)\nprint(a)\n#insert(index,element)\na.insert(0,0)\nprint(a)\n#a.extend(c)\nc=[6,7,8,9]\na.extend(c)\nprint(a)\n\n#one more'''\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import hashlib
md5 = hashlib.md5(b'Najmul')
print(md5.hexdigest())
sha1 = hashlib.sha1(b'Najmul')
print(sha1.hexdigest())
sha224 = hashlib.sha224(b'Najmul')
print(sha224.hexdigest())
sha256 = hashlib.sha256(b'Najmul')
print(sha256.hexdigest())
sha384 = hashlib.sha384(b'Najmul')
print(sha384.hexdigest())
sha512 = hashlib.sha512(b'Najmul')
print(sha512.hexdigest())
|
normal
|
{
"blob_id": "ab4c668c8a167f8c387199b7aa49aa742d563250",
"index": 1698,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(md5.hexdigest())\n<mask token>\nprint(sha1.hexdigest())\n<mask token>\nprint(sha224.hexdigest())\n<mask token>\nprint(sha256.hexdigest())\n<mask token>\nprint(sha384.hexdigest())\n<mask token>\nprint(sha512.hexdigest())\n",
"step-3": "<mask token>\nmd5 = hashlib.md5(b'Najmul')\nprint(md5.hexdigest())\nsha1 = hashlib.sha1(b'Najmul')\nprint(sha1.hexdigest())\nsha224 = hashlib.sha224(b'Najmul')\nprint(sha224.hexdigest())\nsha256 = hashlib.sha256(b'Najmul')\nprint(sha256.hexdigest())\nsha384 = hashlib.sha384(b'Najmul')\nprint(sha384.hexdigest())\nsha512 = hashlib.sha512(b'Najmul')\nprint(sha512.hexdigest())\n",
"step-4": "import hashlib\nmd5 = hashlib.md5(b'Najmul')\nprint(md5.hexdigest())\nsha1 = hashlib.sha1(b'Najmul')\nprint(sha1.hexdigest())\nsha224 = hashlib.sha224(b'Najmul')\nprint(sha224.hexdigest())\nsha256 = hashlib.sha256(b'Najmul')\nprint(sha256.hexdigest())\nsha384 = hashlib.sha384(b'Najmul')\nprint(sha384.hexdigest())\nsha512 = hashlib.sha512(b'Najmul')\nprint(sha512.hexdigest())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from CTO import CTO
#from UI import UIManager
from Cidades import Cidades
from Database import Database
from datetime import datetime
class Main:
def __init__(self, cidade_filename="", dados_filename=""):
#cidade_filename, dados_filename = UIManager().get_filenames()
print("cidade: " + cidade_filename)
self.cidades = Cidades(cidade_filename)
if dados_filename != "":
self.processaCSV(dados_filename)
self.recuperaDados()
self.insereDados()
def processaCSV(self, filename):
with open(filename, 'r', encoding='ISO-8859-1') as input_file:
self.concessao = {}
self.expansao = {}
for line in input_file.readlines():
attributes = line.split(';')
localidade = str(attributes[14])
estacao = str(attributes[15])
cto = str(attributes[1])
status = str(attributes[13])
if localidade in self.cidades.concessao:
if cto in self.concessao:
self.concessao[cto].addLeitura(status)
else:
self.concessao[cto] = CTO(localidade, estacao, cto)
self.concessao[cto].addLeitura(status)
elif localidade in self.cidades.expansao:
if cto in self.expansao:
self.expansao[cto].addLeitura(status)
else:
self.expansao[cto] = CTO(localidade, estacao, cto)
self.expansao[cto].addLeitura(status)
def insereDados(self):
hoje = datetime.utcnow()
#hoje = datetime(2019, 1, 25)
argsCn = []
for nome, cto in self.concessao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoConcessao[nomeCto][8]
antigoData = self.antigoConcessao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoConcessao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsCn.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
argsEx = []
for nome, cto in self.expansao.items():
nomeCto = cto.dict['CTO']
try:
antigoOcupado = self.antigoExpansao[nomeCto][8]
antigoData = self.antigoExpansao[nomeCto][1]
ocupadoAtual = int(cto.dict['OCUPADO'])
vagoAtual = int(cto.dict['VAGO'])
numDias = (hoje - self.antigoExpansao[nomeCto][1]).days
taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias
previsao = vagoAtual / taxa_crescimento
except Exception as e:
previsao = -1
argsEx.append(
(hoje,) + cto.as_a_tuple() + (previsao,)
)
db = Database()
query = """INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsCn)
query = """INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
db.executaQuery(query, argsEx)
def recuperaDados(self):
db = Database()
self.antigoConcessao = {}
self.antigoExpansao = {}
for registro in db.executaQuery('SELECT * from concessao where dia = (select Max(dia) from concessao)'):
self.antigoConcessao[registro[4]] = registro
for registro in db.executaQuery('SELECT * from expansao where dia = (select Max(dia) from expansao)'):
self.antigoExpansao[registro[4]] = registro
if __name__ == '__main__':
Main()
|
normal
|
{
"blob_id": "c5f46be6d7214614892d227c76c75e77433a8fa9",
"index": 9517,
"step-1": "<mask token>\n\n\nclass Main:\n <mask token>\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\nif __name__ == '__main__':\n Main()\n",
"step-4": "from CTO import CTO\nfrom Cidades import Cidades\nfrom Database import Database\nfrom datetime import datetime\n\n\nclass Main:\n\n def __init__(self, cidade_filename='', dados_filename=''):\n print('cidade: ' + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n if dados_filename != '':\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n def insereDados(self):\n hoje = datetime.utcnow()\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append((hoje,) + cto.as_a_tuple() + (previsao,))\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append((hoje,) + cto.as_a_tuple() + (previsao,))\n db = Database()\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery(\n 'SELECT * from concessao where dia = (select Max(dia) from concessao)'\n ):\n self.antigoConcessao[registro[4]] = registro\n for registro in db.executaQuery(\n 'SELECT * from expansao where dia = (select Max(dia) from expansao)'\n ):\n self.antigoExpansao[registro[4]] = registro\n\n\nif __name__ == '__main__':\n Main()\n",
"step-5": "from CTO import CTO\n#from UI import UIManager\nfrom Cidades import Cidades\nfrom Database import Database\nfrom datetime import datetime\n\nclass Main:\n\n def __init__(self, cidade_filename=\"\", dados_filename=\"\"):\n #cidade_filename, dados_filename = UIManager().get_filenames()\n\n print(\"cidade: \" + cidade_filename)\n self.cidades = Cidades(cidade_filename)\n\n if dados_filename != \"\":\n self.processaCSV(dados_filename)\n self.recuperaDados()\n self.insereDados()\n\n\n def processaCSV(self, filename):\n with open(filename, 'r', encoding='ISO-8859-1') as input_file:\n\n self.concessao = {}\n self.expansao = {}\n for line in input_file.readlines():\n attributes = line.split(';')\n\n localidade = str(attributes[14])\n estacao = str(attributes[15])\n cto = str(attributes[1])\n status = str(attributes[13])\n\n if localidade in self.cidades.concessao:\n if cto in self.concessao:\n self.concessao[cto].addLeitura(status)\n else:\n self.concessao[cto] = CTO(localidade, estacao, cto)\n self.concessao[cto].addLeitura(status)\n\n elif localidade in self.cidades.expansao:\n if cto in self.expansao:\n self.expansao[cto].addLeitura(status)\n else:\n self.expansao[cto] = CTO(localidade, estacao, cto)\n self.expansao[cto].addLeitura(status)\n\n\n def insereDados(self):\n hoje = datetime.utcnow()\n #hoje = datetime(2019, 1, 25)\n\n argsCn = []\n for nome, cto in self.concessao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoConcessao[nomeCto][8]\n antigoData = self.antigoConcessao[nomeCto][1]\n\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoConcessao[nomeCto][1]).days\n\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsCn.append(\n (hoje,) + cto.as_a_tuple() + (previsao,)\n )\n\n argsEx = []\n for nome, cto in self.expansao.items():\n nomeCto = cto.dict['CTO']\n try:\n antigoOcupado = self.antigoExpansao[nomeCto][8]\n antigoData = self.antigoExpansao[nomeCto][1]\n\n ocupadoAtual = int(cto.dict['OCUPADO'])\n vagoAtual = int(cto.dict['VAGO'])\n numDias = (hoje - self.antigoExpansao[nomeCto][1]).days\n\n taxa_crescimento = (ocupadoAtual - antigoOcupado) / numDias\n previsao = vagoAtual / taxa_crescimento\n except Exception as e:\n previsao = -1\n argsEx.append(\n (hoje,) + cto.as_a_tuple() + (previsao,)\n )\n\n db = Database()\n\n query = \"\"\"INSERT INTO concessao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsCn)\n\n query = \"\"\"INSERT INTO expansao (dia, local, estacao, cto, defeito, designado, reservado, ocupado, vago, total, previsao_esgotamento)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n db.executaQuery(query, argsEx)\n\n\n def recuperaDados(self):\n db = Database()\n self.antigoConcessao = {}\n self.antigoExpansao = {}\n for registro in db.executaQuery('SELECT * from concessao where dia = (select Max(dia) from concessao)'):\n self.antigoConcessao[registro[4]] = registro\n\n for registro in db.executaQuery('SELECT * from expansao where dia = (select Max(dia) from expansao)'):\n self.antigoExpansao[registro[4]] = registro\n\n\n\nif __name__ == '__main__':\n Main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from typing import Dict, List
from .power_bi_querier import PowerBiQuerier
class DeathsByEthnicity(PowerBiQuerier):
def __init__(self) ->None:
self.source = 'd'
self.name = 'deaths by race'
self.property = 'race'
super().__init__()
def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:
results = super()._parse_data(response_json)
return {ethnicity.strip(): count for ethnicity, count in results}
|
normal
|
{
"blob_id": "d975b74370acc72101f808e70bef64cee39a5ab8",
"index": 6204,
"step-1": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n <mask token>\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-3": "<mask token>\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n\n def __init__(self) ->None:\n self.source = 'd'\n self.name = 'deaths by race'\n self.property = 'race'\n super().__init__()\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-4": "from typing import Dict, List\nfrom .power_bi_querier import PowerBiQuerier\n\n\nclass DeathsByEthnicity(PowerBiQuerier):\n\n def __init__(self) ->None:\n self.source = 'd'\n self.name = 'deaths by race'\n self.property = 'race'\n super().__init__()\n\n def _parse_data(self, response_json: Dict[str, List]) ->Dict[str, int]:\n results = super()._parse_data(response_json)\n return {ethnicity.strip(): count for ethnicity, count in results}\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
if __name__ == '__main__':
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self, dataset_reader: NavigationDatasetReader, encoder:
TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:
Scheduler, num_epochs: int, num_blocks: int, device: torch.device,
checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,
resolution: int=64, patch_size: int=8, block_size: int=4,
batch_size: int=16, output_type: str='per-pixel', checkpoint_every:
int=64, validation_limit: int=16, depth: int=7, score_type: str=
'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,
debug_image_top_k: int=None, debug_image_threshold: float=None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[], encoder=encoder, optimizer=optimizer, scheduler=
scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device
=device, checkpoint_dir=checkpoint_dir, num_models_to_keep=
num_models_to_keep, generate_after_n=generate_after_n,
score_type=score_type, patch_size=patch_size, block_size=
block_size, output_type=output_type, resolution=resolution,
depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=
zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i + self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f'Validating epoch {epoch} step {step}...')
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read(
'dev', validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.
read('dev', self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.
debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f'Test-time pixel acc {mean_acc * 100}')
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f'Training epoch {epoch}...')
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read('train')):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.
next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step + 1)
self.scheduler.step_batch(it)
if (step + 1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.
validation_limit)
print(
f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'
)
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f'{epoch}_{step}', is_best)
step += 1
print(f'skipped {skipped} examples')
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.
validation_limit)
print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')
if self.score_type == 'acc':
return epoch_acc / 2, -1.0
else:
raise AssertionError(f'invalid score type {self.score_type}')
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,
1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs['next_position']
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,
next_patches)
total_loss = next_pixel_loss
print(f'loss {total_loss.item()}')
return total_loss
def generate_debugging_image(self, true_img, path_state, pred_path,
out_path, caption=None, top_k=None, threshold=None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2, 2, figsize=(16, 16))
text_ax = ax[0, 1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize=12)
text_ax.axis('off')
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
img_ax = ax[1, 0]
true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(
float)
true_ax = ax[0, 0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1, :, :]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path < 1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)
pred_ax = ax[1, 1]
pred_ax.imshow(pred_path)
file_path = f'{out_path}.png'
print(f'saving to {file_path}')
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num,
top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size,
output_type='per-patch', upsample=True)
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[
'path_state'].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs['next_position'].shape[0]):
output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'
).joinpath(f'instance_{i}')
output_path.mkdir(parents=True, exist_ok=True)
command = batch_instance['command'][i]
command = [x for x in command if x != '<PAD>']
command = ' '.join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance['path_state'][i]
pred_path = next_position[i]
self.generate_debugging_image(image, path_state, pred_path,
output_path, caption=command, top_k=top_k, threshold=
threshold)
return {'next_f1': next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-08
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = 'cpu'
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f'cuda:{free_gpu_id}'
device = torch.device(device)
print(f'On device {device}')
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=
args.out_path, path_width=args.path_width, read_limit=args.
read_limit, batch_size=args.batch_size, max_len=args.max_len,
tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,
is_bert='bert' in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath('vocab.json')
) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:
json.dump(list(train_vocab), f1)
else:
print(f'Reading vocab from {checkpoint_dir}')
with open(checkpoint_dir.joinpath('vocab.json')) as f1:
train_vocab = json.load(f1)
print(f'got data')
print(f'constructing model...')
if args.embedder == 'random':
embedder = RandomEmbedder(tokenizer, train_vocab, args.
embedding_dim, trainable=True)
elif args.embedder == 'glove':
embedder = GloveEmbedder(tokenizer, train_vocab, args.
embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith('bert'):
embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.
max_len)
else:
raise NotImplementedError(f'No embedder {args.embedder}')
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size=args.resolution, patch_size=args.
patch_size, language_embedder=embedder, n_layers=args.n_layers,
channels=args.channels, n_heads=args.n_heads, hidden_dim=args.
hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout
=args.embed_dropout, output_type=args.output_type,
positional_encoding_type=args.pos_encoding_type, device=device,
log_weights=args.test, locality_mask=args.locality_mask,
locality_neighborhood=args.locality_neighborhood, init_scale=args.
init_scale)
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=
args.warmup, factor=args.lr_factor)
best_epoch = -1
block_size = int(args.resolution * 4 / 64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
try:
assert len(glob.glob(os.path.join(args.checkpoint_dir,
'*.th'))) == 0
except AssertionError:
raise AssertionError(
f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'
)
else:
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).
joinpath('best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
best_checkpoint_data = json.load(open(pathlib.Path(args.
checkpoint_dir).joinpath('best_training_state.json')))
print(f'best_checkpoint_data {best_checkpoint_data}')
best_epoch = best_checkpoint_data['epoch']
with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),
'w') as f1:
dump_args = copy.deepcopy(args)
del dump_args.__dict__['cfg']
del dump_args.__dict__['__cwd__']
del dump_args.__dict__['__path__']
to_dump = dump_args.__dict__
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
print(f'loading model weights from {args.checkpoint_dir}')
encoder = encoder.to('cpu')
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(
'best.th'), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,
encoder=encoder, optimizer=optimizer, scheduler=scheduler,
num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,
checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.
checkpoint_every, validation_limit=args.validation_limit,
num_models_to_keep=args.num_models_to_keep, generate_after_n=args.
generate_after_n, score_type=args.score_type, depth=depth,
resolution=args.resolution, output_type=args.output_type,
patch_size=args.patch_size, block_size=block_size, best_epoch=
best_epoch, seed=args.seed, zero_weight=args.zero_weight,
debug_image_top_k=args.debug_image_top_k, debug_image_threshold=
args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f'evaluating')
acc = trainer.evaluate()
print(f'accuracy: {acc}')
if __name__ == '__main__':
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
<|reserved_special_token_1|>
import json
from jsonargparse import ArgumentParser, ActionConfigFile
import yaml
from typing import List, Dict
import glob
import os
import pathlib
import pdb
import subprocess
import copy
from io import StringIO
from collections import defaultdict
import torch
from spacy.tokenizer import Tokenizer
from spacy.lang.en import English
from einops import rearrange
import logging
from tqdm import tqdm
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib import gridspec
import numpy as np
import torch.autograd.profiler as profiler
from torch.nn import functional as F
from torch.optim.lr_scheduler import StepLR
from allennlp.training.scheduler import Scheduler
from allennlp.training.learning_rate_schedulers import NoamLR
import pandas as pd
from transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image
from metrics import MSEMetric, AccuracyMetric, F1Metric
from language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder
from navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser
from train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer
from navigation_transformer import NavigationTransformerEncoder
from train_transformer import TransformerTrainer
logger = logging.getLogger(__name__)
class NavigationTransformerTrainer(TransformerTrainer):
def __init__(self,
dataset_reader: NavigationDatasetReader,
encoder: TransformerEncoder,
optimizer: torch.optim.Optimizer,
scheduler: Scheduler,
num_epochs: int,
num_blocks: int,
device: torch.device,
checkpoint_dir: str,
num_models_to_keep: int,
generate_after_n: int,
resolution: int = 64,
patch_size: int = 8,
block_size: int = 4,
batch_size: int = 16,
output_type: str = "per-pixel",
checkpoint_every: int = 64,
validation_limit: int = 16,
depth: int = 7,
score_type: str = "acc",
best_epoch: int = -1,
seed: int = 12,
zero_weight: float = 0.05,
debug_image_top_k: int = None,
debug_image_threshold: float = None):
super(NavigationTransformerTrainer, self).__init__(train_data=[],
val_data=[],
encoder=encoder,
optimizer=optimizer,
scheduler=scheduler,
num_epochs=num_epochs,
num_blocks=num_blocks,
device=device,
checkpoint_dir=checkpoint_dir,
num_models_to_keep=num_models_to_keep,
generate_after_n=generate_after_n,
score_type=score_type,
patch_size=patch_size,
block_size=block_size,
output_type=output_type,
resolution=resolution,
depth=depth,
best_epoch=best_epoch,
seed=seed,
zero_weight=zero_weight)
self.f1_metric = F1Metric()
self.dataset_reader = dataset_reader
self.batch_size = batch_size
self.checkpoint_every = checkpoint_every
self.validation_limit = validation_limit
if debug_image_top_k < 0:
debug_image_top_k = None
if debug_image_threshold < 0:
debug_image_threshold = None
self.debug_image_top_k = debug_image_top_k
self.debug_image_threshold = debug_image_threshold
def split_large_batch(self, batch):
large_bsz = batch['path_state'].shape[0]
small_batches = []
for i in range(0, large_bsz, self.batch_size):
small_batch = {}
for k in batch.keys():
small_batch[k] = batch[k][i:i+self.batch_size]
small_batches.append(small_batch)
return small_batches
def validate_one_epoch(self, epoch, step, validation_limit):
print(f"Validating epoch {epoch} step {step}...")
total_prev_acc, total_next_acc = 0.0, 0.0
total = 0
self.encoder.eval()
for b, dev_batch_instance in enumerate(self.dataset_reader.read("dev", validation_limit)):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, epoch, b, 0)
total_next_acc += score_dict['next_f1']
total += 1
mean_next_acc = total_next_acc / total
return mean_next_acc
def evaluate(self):
total_acc = 0.0
total = 0
total_block_acc = 0.0
self.encoder.eval()
for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read("dev", self.validation_limit))):
actual_batches = self.split_large_batch(dev_batch_instance)
for small_batch in actual_batches:
score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold)
total_acc += score_dict['next_f1']
total += 1
mean_acc = total_acc / total
print(f"Test-time pixel acc {mean_acc * 100}")
return mean_acc
def train_and_validate_one_epoch(self, epoch):
print(f"Training epoch {epoch}...")
self.encoder.train()
skipped = 0
step = 0
for b, batch_instance in enumerate(self.dataset_reader.read("train")):
actual_batches = self.split_large_batch(batch_instance)
for sb, small_batch in enumerate(actual_batches):
is_best = False
self.optimizer.zero_grad()
outputs = self.encoder(small_batch)
# skip bad examples
if outputs is None:
skipped += 1
continue
loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight)
loss.backward()
self.optimizer.step()
it = (epoch + 1) * (step+1)
self.scheduler.step_batch(it)
#print(f"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}")
if (step+1) % self.checkpoint_every == 0:
step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)
print(f"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}")
if step_acc > self.best_score:
is_best = True
self.best_score = step_acc
self.save_model(f"{epoch}_{step}", is_best)
step += 1
print(f"skipped {skipped} examples")
epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit)
print(f"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}")
if self.score_type == "acc":
return (epoch_acc)/2, -1.0
else:
raise AssertionError(f"invalid score type {self.score_type}")
def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):
"""
compute per-patch for each patch
"""
bsz, w, h, __ = inputs['input_image'].shape
pred_next_image = outputs["next_position"]
path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()
true_next_image = image_to_tiles(path_state, self.patch_size)
# binarize patches
next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True)
next_patches = torch.zeros_like(next_sum_image)
# any patch that has a 1 pixel in it gets 1
next_patches[next_sum_image != 0] = 1
pred_next_image = pred_next_image.squeeze(-1)
next_patches = next_patches.squeeze(-1).to(self.device).long()
pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')
next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches)
total_loss = next_pixel_loss
print(f"loss {total_loss.item()}")
return total_loss
def generate_debugging_image(self,
true_img,
path_state,
pred_path,
out_path,
caption = None,
top_k = None,
threshold = None):
caption = self.wrap_caption(caption)
fig, ax = plt.subplots(2,2, figsize=(16,16))
# gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
text_ax = ax[0,1]
text_ax.axis([0, 1, 0, 1])
text_ax.text(0.2, 0.02, caption, fontsize = 12)
text_ax.axis("off")
props = dict(boxstyle='round',
facecolor='wheat', alpha=0.5)
text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,
verticalalignment='top', bbox=props)
# img_ax = plt.subplot(gs[2])
img_ax = ax[1,0]
#w = int(40 * (self.resolution / 224))
true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]
img_ax.imshow(true_img)
true_path = path_state.detach().numpy()
true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)
true_ax = ax[0,0]
true_ax.imshow(true_path)
pred_path = torch.softmax(pred_path, dim=0)
pred_path = pred_path[1,:,:]
pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)
if top_k is not None:
top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]
top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))
pred_path[top_k_inds] = 1.1
pred_path[pred_path<1.0] = 0
pred_path[top_k_inds] = 1.0
elif threshold is not None:
pred_path[pred_path < threshold] = 0
else:
pred_path = pred_path
pred_path = np.tile(pred_path, (1,1,3)).astype(float)
pred_ax = ax[1,1]
pred_ax.imshow(pred_path)
file_path = f"{out_path}.png"
print(f"saving to {file_path}")
plt.savefig(file_path)
plt.close()
def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold):
self.encoder.eval()
outputs = self.encoder(batch_instance)
next_position = outputs['next_position']
next_position = tiles_to_image(next_position, self.patch_size, output_type="per-patch", upsample=True)
# f1 metric
next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance["path_state"].unsqueeze(-1), next_position)
if epoch_num > self.generate_after_n:
for i in range(outputs["next_position"].shape[0]):
output_path = self.checkpoint_dir.joinpath(f"batch_{batch_num}").joinpath(f"instance_{i}")
output_path.mkdir(parents = True, exist_ok=True)
command = batch_instance["command"][i]
command = [x for x in command if x != "<PAD>"]
command = " ".join(command)
image = batch_instance['input_image'][i]
path_state = batch_instance["path_state"][i]
pred_path = next_position[i]
self.generate_debugging_image(image,
path_state,
pred_path,
output_path,
caption = command,
top_k = top_k,
threshold = threshold)
return {"next_f1": next_f1}
def compute_f1(self, true_pos, pred_pos):
eps = 1e-8
values, pred_pixels = torch.max(pred_pos, dim=1)
gold_pixels = true_pos
pred_pixels = pred_pixels.unsqueeze(1)
pred_pixels = pred_pixels.detach().cpu().float()
gold_pixels = gold_pixels.detach().cpu().float()
total_pixels = sum(pred_pixels.shape)
true_pos = torch.sum(pred_pixels * gold_pixels).item()
true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item()
false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()
false_neg = torch.sum((1-pred_pixels) * gold_pixels).item()
precision = true_pos / (true_pos + false_pos + eps)
recall = true_pos / (true_pos + false_neg + eps)
f1 = 2 * (precision * recall) / (precision + recall + eps)
return precision, recall, f1
def main(args):
device = "cpu"
if args.cuda is not None:
free_gpu_id = get_free_gpu()
if free_gpu_id > -1:
device = f"cuda:{free_gpu_id}"
#device = "cuda:0"
device = torch.device(device)
print(f"On device {device}")
#test = torch.ones((1))
#test = test.to(device)
nlp = English()
tokenizer = Tokenizer(nlp.vocab)
dataset_reader = NavigationDatasetReader(dir = args.data_dir,
out_path = args.out_path,
path_width = args.path_width,
read_limit = args.read_limit,
batch_size = args.batch_size,
max_len = args.max_len,
tokenizer = tokenizer,
shuffle = args.shuffle,
overfit = args.overfit,
is_bert = "bert" in args.embedder)
checkpoint_dir = pathlib.Path(args.checkpoint_dir)
if not checkpoint_dir.exists():
checkpoint_dir.mkdir()
if not args.test:
with open(dataset_reader.path_dict['train'].joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
with open(checkpoint_dir.joinpath("vocab.json"), "w") as f1:
json.dump(list(train_vocab), f1)
else:
print(f"Reading vocab from {checkpoint_dir}")
with open(checkpoint_dir.joinpath("vocab.json")) as f1:
train_vocab = json.load(f1)
print(f"got data")
# construct the vocab and tokenizer
print(f"constructing model...")
# get the embedder from args
if args.embedder == "random":
embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)
elif args.embedder == "glove":
embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True)
elif args.embedder.startswith("bert"):
embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len)
else:
raise NotImplementedError(f"No embedder {args.embedder}")
depth = 1
encoder_cls = NavigationTransformerEncoder
encoder_kwargs = dict(image_size = args.resolution,
patch_size = args.patch_size,
language_embedder = embedder,
n_layers = args.n_layers,
channels = args.channels,
n_heads = args.n_heads,
hidden_dim = args.hidden_dim,
ff_dim = args.ff_dim,
dropout = args.dropout,
embed_dropout = args.embed_dropout,
output_type = args.output_type,
positional_encoding_type = args.pos_encoding_type,
device = device,
log_weights = args.test,
locality_mask = args.locality_mask,
locality_neighborhood = args.locality_neighborhood,
init_scale = args.init_scale)
# Initialize encoder
encoder = encoder_cls(**encoder_kwargs)
if args.cuda is not None:
encoder = encoder.cuda(device)
print(encoder)
# construct optimizer
optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)
# scheduler
scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor)
best_epoch = -1
block_size = int((args.resolution * 4)/64)
if not args.test:
if not args.resume:
try:
os.mkdir(args.checkpoint_dir)
except FileExistsError:
# file exists
try:
assert(len(glob.glob(os.path.join(args.checkpoint_dir, "*.th"))) == 0)
except AssertionError:
raise AssertionError(f"Output directory {args.checkpoint_dir} non-empty, will not overwrite!")
else:
# resume from pre-trained
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
# get training info
best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath("best_training_state.json")))
print(f"best_checkpoint_data {best_checkpoint_data}")
best_epoch = best_checkpoint_data["epoch"]
# save arg config to checkpoint_dir
with open(pathlib.Path(args.checkpoint_dir).joinpath("config.yaml"), "w") as f1:
dump_args = copy.deepcopy(args)
# drop stuff we can't serialize
del(dump_args.__dict__["cfg"])
del(dump_args.__dict__["__cwd__"])
del(dump_args.__dict__["__path__"])
to_dump = dump_args.__dict__
# dump
yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)
else:
# test-time, load best model
print(f"loading model weights from {args.checkpoint_dir}")
#state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"))
#encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.to("cpu")
state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath("best.th"), map_location='cpu')
encoder.load_state_dict(state_dict, strict=True)
encoder = encoder.cuda(device)
num_blocks = 1
# construct trainer
trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,
encoder = encoder,
optimizer = optimizer,
scheduler = scheduler,
num_epochs = args.num_epochs,
num_blocks = num_blocks,
device = device,
checkpoint_dir = args.checkpoint_dir,
checkpoint_every = args.checkpoint_every,
validation_limit = args.validation_limit,
num_models_to_keep = args.num_models_to_keep,
generate_after_n = args.generate_after_n,
score_type=args.score_type,
depth = depth,
resolution = args.resolution,
output_type = args.output_type,
patch_size = args.patch_size,
block_size = block_size,
best_epoch = best_epoch,
seed = args.seed,
zero_weight = args.zero_weight,
debug_image_top_k = args.debug_image_top_k,
debug_image_threshold = args.debug_image_threshold)
if not args.test:
trainer.train()
else:
print(f"evaluating")
acc = trainer.evaluate()
print(f"accuracy: {acc}")
if __name__ == "__main__":
np.random.seed(12)
torch.manual_seed(12)
parser = configure_parser()
args = parser.parse_args()
main(args)
|
flexible
|
{
"blob_id": "04aacf9461ade2e229076ffdf85aca913037edad",
"index": 642,
"step-1": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\nif __name__ == '__main__':\n np.random.seed(12)\n torch.manual_seed(12)\n parser = configure_parser()\n args = parser.parse_args()\n main(args)\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass NavigationTransformerTrainer(TransformerTrainer):\n\n def __init__(self, dataset_reader: NavigationDatasetReader, encoder:\n TransformerEncoder, optimizer: torch.optim.Optimizer, scheduler:\n Scheduler, num_epochs: int, num_blocks: int, device: torch.device,\n checkpoint_dir: str, num_models_to_keep: int, generate_after_n: int,\n resolution: int=64, patch_size: int=8, block_size: int=4,\n batch_size: int=16, output_type: str='per-pixel', checkpoint_every:\n int=64, validation_limit: int=16, depth: int=7, score_type: str=\n 'acc', best_epoch: int=-1, seed: int=12, zero_weight: float=0.05,\n debug_image_top_k: int=None, debug_image_threshold: float=None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[], encoder=encoder, optimizer=optimizer, scheduler=\n scheduler, num_epochs=num_epochs, num_blocks=num_blocks, device\n =device, checkpoint_dir=checkpoint_dir, num_models_to_keep=\n num_models_to_keep, generate_after_n=generate_after_n,\n score_type=score_type, patch_size=patch_size, block_size=\n block_size, output_type=output_type, resolution=resolution,\n depth=depth, best_epoch=best_epoch, seed=seed, zero_weight=\n zero_weight)\n self.f1_metric = F1Metric()\n self.dataset_reader = dataset_reader\n self.batch_size = batch_size\n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {}\n for k in batch.keys():\n small_batch[k] = batch[k][i:i + self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f'Validating epoch {epoch} step {step}...')\n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0\n self.encoder.eval()\n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\n 'dev', validation_limit)):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0)\n total_next_acc += score_dict['next_f1']\n total += 1\n mean_next_acc = total_next_acc / total\n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0\n total = 0\n total_block_acc = 0.0\n self.encoder.eval()\n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.\n read('dev', self.validation_limit))):\n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.\n debug_image_top_k, self.debug_image_threshold)\n total_acc += score_dict['next_f1']\n total += 1\n mean_acc = total_acc / total\n print(f'Test-time pixel acc {mean_acc * 100}')\n return mean_acc\n\n def train_and_validate_one_epoch(self, epoch):\n print(f'Training epoch {epoch}...')\n self.encoder.train()\n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read('train')):\n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad()\n outputs = self.encoder(small_batch)\n if outputs is None:\n skipped += 1\n continue\n loss = self.compute_patch_loss(small_batch, outputs, self.\n next_to_prev_weight)\n loss.backward()\n self.optimizer.step()\n it = (epoch + 1) * (step + 1)\n self.scheduler.step_batch(it)\n if (step + 1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.\n validation_limit)\n print(\n f'Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}'\n )\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n self.save_model(f'{epoch}_{step}', is_best)\n step += 1\n print(f'skipped {skipped} examples')\n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.\n validation_limit)\n print(f'Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}')\n if self.score_type == 'acc':\n return epoch_acc / 2, -1.0\n else:\n raise AssertionError(f'invalid score type {self.score_type}')\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight=[1.0,\n 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape\n pred_next_image = outputs['next_position']\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float()\n true_next_image = image_to_tiles(path_state, self.patch_size)\n next_sum_image = torch.sum(true_next_image, dim=2, keepdim=True)\n next_patches = torch.zeros_like(next_sum_image)\n next_patches[next_sum_image != 0] = 1\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long()\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image,\n next_patches)\n total_loss = next_pixel_loss\n print(f'loss {total_loss.item()}')\n return total_loss\n\n def generate_debugging_image(self, true_img, path_state, pred_path,\n out_path, caption=None, top_k=None, threshold=None):\n caption = self.wrap_caption(caption)\n fig, ax = plt.subplots(2, 2, figsize=(16, 16))\n text_ax = ax[0, 1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize=12)\n text_ax.axis('off')\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n img_ax = ax[1, 0]\n true_img = true_img.detach().cpu().numpy().astype(float)[:, :, 0:3]\n img_ax.imshow(true_img)\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1, 1, 3)).astype(\n float)\n true_ax = ax[0, 0]\n true_ax.imshow(true_path)\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1, :, :]\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape=(512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path < 1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path\n pred_path = np.tile(pred_path, (1, 1, 3)).astype(float)\n pred_ax = ax[1, 1]\n pred_ax.imshow(pred_path)\n file_path = f'{out_path}.png'\n print(f'saving to {file_path}')\n plt.savefig(file_path)\n plt.close()\n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num,\n top_k, threshold):\n self.encoder.eval()\n outputs = self.encoder(batch_instance)\n next_position = outputs['next_position']\n next_position = tiles_to_image(next_position, self.patch_size,\n output_type='per-patch', upsample=True)\n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\n 'path_state'].unsqueeze(-1), next_position)\n if epoch_num > self.generate_after_n:\n for i in range(outputs['next_position'].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f'batch_{batch_num}'\n ).joinpath(f'instance_{i}')\n output_path.mkdir(parents=True, exist_ok=True)\n command = batch_instance['command'][i]\n command = [x for x in command if x != '<PAD>']\n command = ' '.join(command)\n image = batch_instance['input_image'][i]\n path_state = batch_instance['path_state'][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image, path_state, pred_path,\n output_path, caption=command, top_k=top_k, threshold=\n threshold)\n return {'next_f1': next_f1}\n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-08\n values, pred_pixels = torch.max(pred_pos, dim=1)\n gold_pixels = true_pos\n pred_pixels = pred_pixels.unsqueeze(1)\n pred_pixels = pred_pixels.detach().cpu().float()\n gold_pixels = gold_pixels.detach().cpu().float()\n total_pixels = sum(pred_pixels.shape)\n true_pos = torch.sum(pred_pixels * gold_pixels).item()\n true_neg = torch.sum((1 - pred_pixels) * (1 - gold_pixels)).item()\n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item()\n false_neg = torch.sum((1 - pred_pixels) * gold_pixels).item()\n precision = true_pos / (true_pos + false_pos + eps)\n recall = true_pos / (true_pos + false_neg + eps)\n f1 = 2 * (precision * recall) / (precision + recall + eps)\n return precision, recall, f1\n\n\ndef main(args):\n device = 'cpu'\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f'cuda:{free_gpu_id}'\n device = torch.device(device)\n print(f'On device {device}')\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n dataset_reader = NavigationDatasetReader(dir=args.data_dir, out_path=\n args.out_path, path_width=args.path_width, read_limit=args.\n read_limit, batch_size=args.batch_size, max_len=args.max_len,\n tokenizer=tokenizer, shuffle=args.shuffle, overfit=args.overfit,\n is_bert='bert' in args.embedder)\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir()\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath('vocab.json')\n ) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath('vocab.json'), 'w') as f1:\n json.dump(list(train_vocab), f1)\n else:\n print(f'Reading vocab from {checkpoint_dir}')\n with open(checkpoint_dir.joinpath('vocab.json')) as f1:\n train_vocab = json.load(f1)\n print(f'got data')\n print(f'constructing model...')\n if args.embedder == 'random':\n embedder = RandomEmbedder(tokenizer, train_vocab, args.\n embedding_dim, trainable=True)\n elif args.embedder == 'glove':\n embedder = GloveEmbedder(tokenizer, train_vocab, args.\n embedding_file, args.embedding_dim, trainable=True)\n elif args.embedder.startswith('bert'):\n embedder = BERTEmbedder(model_name=args.embedder, max_seq_len=args.\n max_len)\n else:\n raise NotImplementedError(f'No embedder {args.embedder}')\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n encoder_kwargs = dict(image_size=args.resolution, patch_size=args.\n patch_size, language_embedder=embedder, n_layers=args.n_layers,\n channels=args.channels, n_heads=args.n_heads, hidden_dim=args.\n hidden_dim, ff_dim=args.ff_dim, dropout=args.dropout, embed_dropout\n =args.embed_dropout, output_type=args.output_type,\n positional_encoding_type=args.pos_encoding_type, device=device,\n log_weights=args.test, locality_mask=args.locality_mask,\n locality_neighborhood=args.locality_neighborhood, init_scale=args.\n init_scale)\n encoder = encoder_cls(**encoder_kwargs)\n if args.cuda is not None:\n encoder = encoder.cuda(device)\n print(encoder)\n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate)\n scheduler = NoamLR(optimizer, model_size=args.hidden_dim, warmup_steps=\n args.warmup, factor=args.lr_factor)\n best_epoch = -1\n block_size = int(args.resolution * 4 / 64)\n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n try:\n assert len(glob.glob(os.path.join(args.checkpoint_dir,\n '*.th'))) == 0\n except AssertionError:\n raise AssertionError(\n f'Output directory {args.checkpoint_dir} non-empty, will not overwrite!'\n )\n else:\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).\n joinpath('best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n best_checkpoint_data = json.load(open(pathlib.Path(args.\n checkpoint_dir).joinpath('best_training_state.json')))\n print(f'best_checkpoint_data {best_checkpoint_data}')\n best_epoch = best_checkpoint_data['epoch']\n with open(pathlib.Path(args.checkpoint_dir).joinpath('config.yaml'),\n 'w') as f1:\n dump_args = copy.deepcopy(args)\n del dump_args.__dict__['cfg']\n del dump_args.__dict__['__cwd__']\n del dump_args.__dict__['__path__']\n to_dump = dump_args.__dict__\n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True)\n else:\n print(f'loading model weights from {args.checkpoint_dir}')\n encoder = encoder.to('cpu')\n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\n 'best.th'), map_location='cpu')\n encoder.load_state_dict(state_dict, strict=True)\n encoder = encoder.cuda(device)\n num_blocks = 1\n trainer = NavigationTransformerTrainer(dataset_reader=dataset_reader,\n encoder=encoder, optimizer=optimizer, scheduler=scheduler,\n num_epochs=args.num_epochs, num_blocks=num_blocks, device=device,\n checkpoint_dir=args.checkpoint_dir, checkpoint_every=args.\n checkpoint_every, validation_limit=args.validation_limit,\n num_models_to_keep=args.num_models_to_keep, generate_after_n=args.\n generate_after_n, score_type=args.score_type, depth=depth,\n resolution=args.resolution, output_type=args.output_type,\n patch_size=args.patch_size, block_size=block_size, best_epoch=\n best_epoch, seed=args.seed, zero_weight=args.zero_weight,\n debug_image_top_k=args.debug_image_top_k, debug_image_threshold=\n args.debug_image_threshold)\n if not args.test:\n trainer.train()\n else:\n print(f'evaluating')\n acc = trainer.evaluate()\n print(f'accuracy: {acc}')\n\n\nif __name__ == '__main__':\n np.random.seed(12)\n torch.manual_seed(12)\n parser = configure_parser()\n args = parser.parse_args()\n main(args)\n",
"step-5": "import json \nfrom jsonargparse import ArgumentParser, ActionConfigFile \nimport yaml \nfrom typing import List, Dict\nimport glob\nimport os \nimport pathlib\nimport pdb \nimport subprocess \nimport copy \nfrom io import StringIO\nfrom collections import defaultdict\n\nimport torch\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.lang.en import English\nfrom einops import rearrange \nimport logging \nfrom tqdm import tqdm \nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib import gridspec\nimport numpy as np\nimport torch.autograd.profiler as profiler\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import StepLR\nfrom allennlp.training.scheduler import Scheduler \nfrom allennlp.training.learning_rate_schedulers import NoamLR\nimport pandas as pd \n\nfrom transformer import TransformerEncoder, ResidualTransformerEncoder, image_to_tiles, tiles_to_image\nfrom metrics import MSEMetric, AccuracyMetric, F1Metric\nfrom language_embedders import RandomEmbedder, GloveEmbedder, BERTEmbedder\nfrom navigation_data import NavigationDatasetReader, NavigationImageTrajectory, configure_parser\nfrom train_language_encoder import get_free_gpu, load_data, get_vocab, LanguageTrainer, FlatLanguageTrainer\nfrom navigation_transformer import NavigationTransformerEncoder\nfrom train_transformer import TransformerTrainer\n\nlogger = logging.getLogger(__name__)\n\nclass NavigationTransformerTrainer(TransformerTrainer): \n def __init__(self,\n dataset_reader: NavigationDatasetReader,\n encoder: TransformerEncoder,\n optimizer: torch.optim.Optimizer,\n scheduler: Scheduler, \n num_epochs: int,\n num_blocks: int, \n device: torch.device,\n checkpoint_dir: str,\n num_models_to_keep: int,\n generate_after_n: int,\n resolution: int = 64, \n patch_size: int = 8,\n block_size: int = 4, \n batch_size: int = 16, \n output_type: str = \"per-pixel\", \n checkpoint_every: int = 64,\n validation_limit: int = 16, \n depth: int = 7,\n score_type: str = \"acc\",\n best_epoch: int = -1,\n seed: int = 12, \n zero_weight: float = 0.05,\n debug_image_top_k: int = None,\n debug_image_threshold: float = None):\n super(NavigationTransformerTrainer, self).__init__(train_data=[],\n val_data=[],\n encoder=encoder,\n optimizer=optimizer,\n scheduler=scheduler,\n num_epochs=num_epochs,\n num_blocks=num_blocks,\n device=device,\n checkpoint_dir=checkpoint_dir,\n num_models_to_keep=num_models_to_keep,\n generate_after_n=generate_after_n,\n score_type=score_type,\n patch_size=patch_size,\n block_size=block_size,\n output_type=output_type,\n resolution=resolution, \n depth=depth, \n best_epoch=best_epoch,\n seed=seed,\n zero_weight=zero_weight) \n self.f1_metric = F1Metric() \n self.dataset_reader = dataset_reader\n self.batch_size = batch_size \n self.checkpoint_every = checkpoint_every\n self.validation_limit = validation_limit\n if debug_image_top_k < 0:\n debug_image_top_k = None\n if debug_image_threshold < 0:\n debug_image_threshold = None\n self.debug_image_top_k = debug_image_top_k\n self.debug_image_threshold = debug_image_threshold\n\n def split_large_batch(self, batch):\n large_bsz = batch['path_state'].shape[0]\n small_batches = []\n for i in range(0, large_bsz, self.batch_size):\n small_batch = {} \n for k in batch.keys():\n small_batch[k] = batch[k][i:i+self.batch_size]\n small_batches.append(small_batch)\n return small_batches\n\n def validate_one_epoch(self, epoch, step, validation_limit):\n print(f\"Validating epoch {epoch} step {step}...\") \n total_prev_acc, total_next_acc = 0.0, 0.0\n total = 0 \n self.encoder.eval() \n for b, dev_batch_instance in enumerate(self.dataset_reader.read(\"dev\", validation_limit)): \n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, epoch, b, 0) \n total_next_acc += score_dict['next_f1']\n total += 1\n\n mean_next_acc = total_next_acc / total \n return mean_next_acc\n\n def evaluate(self):\n total_acc = 0.0 \n total = 0 \n total_block_acc = 0.0 \n self.encoder.eval() \n for b, dev_batch_instance in tqdm(enumerate(self.dataset_reader.read(\"dev\", self.validation_limit))): \n actual_batches = self.split_large_batch(dev_batch_instance)\n for small_batch in actual_batches:\n score_dict = self.validate(small_batch, 10, b, 0, self.debug_image_top_k, self.debug_image_threshold) \n total_acc += score_dict['next_f1']\n total += 1\n\n mean_acc = total_acc / total \n print(f\"Test-time pixel acc {mean_acc * 100}\") \n return mean_acc \n\n def train_and_validate_one_epoch(self, epoch): \n print(f\"Training epoch {epoch}...\") \n self.encoder.train() \n skipped = 0\n step = 0\n for b, batch_instance in enumerate(self.dataset_reader.read(\"train\")): \n actual_batches = self.split_large_batch(batch_instance)\n for sb, small_batch in enumerate(actual_batches):\n is_best = False\n self.optimizer.zero_grad() \n outputs = self.encoder(small_batch) \n # skip bad examples \n if outputs is None:\n skipped += 1\n continue\n\n loss = self.compute_patch_loss(small_batch, outputs, self.next_to_prev_weight) \n loss.backward() \n self.optimizer.step() \n it = (epoch + 1) * (step+1) \n self.scheduler.step_batch(it) \n #print(f\"step: {step+1} checkpoint_every: {self.checkpoint_every} {(step +1) % self.checkpoint_every}\")\n if (step+1) % self.checkpoint_every == 0:\n step_acc = self.validate_one_epoch(epoch, step, self.validation_limit)\n print(f\"Epoch {epoch} step {step} has next pixel F1 {step_acc * 100:.2f}\")\n if step_acc > self.best_score:\n is_best = True\n self.best_score = step_acc\n\n self.save_model(f\"{epoch}_{step}\", is_best) \n\n step += 1\n print(f\"skipped {skipped} examples\") \n epoch_acc = self.validate_one_epoch(epoch, step, 10 * self.validation_limit) \n print(f\"Epoch {epoch} has next pixel F1 {epoch_acc * 100:.2f}\") \n if self.score_type == \"acc\":\n return (epoch_acc)/2, -1.0\n else:\n raise AssertionError(f\"invalid score type {self.score_type}\")\n\n def compute_patch_loss(self, inputs, outputs, next_to_prev_weight = [1.0, 1.0]):\n \"\"\"\n compute per-patch for each patch \n \"\"\"\n bsz, w, h, __ = inputs['input_image'].shape \n\n pred_next_image = outputs[\"next_position\"]\n\n path_state = inputs['path_state'].reshape(bsz, 1, w, h).float() \n true_next_image = image_to_tiles(path_state, self.patch_size) \n\n # binarize patches\n next_sum_image = torch.sum(true_next_image, dim = 2, keepdim=True) \n next_patches = torch.zeros_like(next_sum_image)\n # any patch that has a 1 pixel in it gets 1 \n next_patches[next_sum_image != 0] = 1\n\n pred_next_image = pred_next_image.squeeze(-1)\n next_patches = next_patches.squeeze(-1).to(self.device).long() \n\n pred_next_image = rearrange(pred_next_image, 'b n c -> b c n')\n\n next_pixel_loss = self.weighted_xent_loss_fxn(pred_next_image, next_patches) \n\n total_loss = next_pixel_loss \n print(f\"loss {total_loss.item()}\")\n\n return total_loss\n\n def generate_debugging_image(self, \n true_img, \n path_state, \n pred_path, \n out_path, \n caption = None,\n top_k = None,\n threshold = None): \n caption = self.wrap_caption(caption)\n\n fig, ax = plt.subplots(2,2, figsize=(16,16))\n\n # gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])\n text_ax = ax[0,1]\n text_ax.axis([0, 1, 0, 1])\n text_ax.text(0.2, 0.02, caption, fontsize = 12)\n text_ax.axis(\"off\") \n\n props = dict(boxstyle='round', \n facecolor='wheat', alpha=0.5)\n text_ax.text(0.05, 0.95, caption, wrap=True, fontsize=14,\n verticalalignment='top', bbox=props)\n # img_ax = plt.subplot(gs[2])\n img_ax = ax[1,0]\n #w = int(40 * (self.resolution / 224))\n true_img = true_img.detach().cpu().numpy().astype(float)[:,:,0:3]\n img_ax.imshow(true_img)\n\n true_path = path_state.detach().numpy()\n true_path = np.tile(true_path.reshape(512, 512, 1), (1,1,3)).astype(float)\n\n true_ax = ax[0,0]\n true_ax.imshow(true_path)\n\n pred_path = torch.softmax(pred_path, dim=0)\n pred_path = pred_path[1,:,:]\n\n\n pred_path = pred_path.cpu().detach().numpy().reshape(512, 512, 1)\n\n if top_k is not None:\n top_k_inds = np.argpartition(pred_path, -top_k, axis=None)[-top_k:]\n top_k_inds = np.unravel_index(top_k_inds, shape = (512, 512))\n pred_path[top_k_inds] = 1.1\n pred_path[pred_path<1.0] = 0\n pred_path[top_k_inds] = 1.0\n elif threshold is not None:\n pred_path[pred_path < threshold] = 0\n else:\n pred_path = pred_path \n\n\n pred_path = np.tile(pred_path, (1,1,3)).astype(float)\n\n pred_ax = ax[1,1]\n pred_ax.imshow(pred_path)\n\n file_path = f\"{out_path}.png\"\n print(f\"saving to {file_path}\") \n plt.savefig(file_path) \n plt.close() \n\n def validate(self, batch_instance, epoch_num, batch_num, instance_num, top_k, threshold): \n self.encoder.eval() \n outputs = self.encoder(batch_instance) \n next_position = outputs['next_position']\n\n next_position = tiles_to_image(next_position, self.patch_size, output_type=\"per-patch\", upsample=True) \n # f1 metric \n next_p, next_r, next_f1 = self.f1_metric.compute_f1(batch_instance[\"path_state\"].unsqueeze(-1), next_position) \n\n if epoch_num > self.generate_after_n: \n for i in range(outputs[\"next_position\"].shape[0]):\n output_path = self.checkpoint_dir.joinpath(f\"batch_{batch_num}\").joinpath(f\"instance_{i}\")\n output_path.mkdir(parents = True, exist_ok=True)\n command = batch_instance[\"command\"][i]\n command = [x for x in command if x != \"<PAD>\"]\n command = \" \".join(command) \n image = batch_instance['input_image'][i]\n path_state = batch_instance[\"path_state\"][i]\n pred_path = next_position[i]\n self.generate_debugging_image(image,\n path_state,\n pred_path,\n output_path,\n caption = command,\n top_k = top_k,\n threshold = threshold)\n\n return {\"next_f1\": next_f1} \n\n def compute_f1(self, true_pos, pred_pos):\n eps = 1e-8\n values, pred_pixels = torch.max(pred_pos, dim=1) \n gold_pixels = true_pos \n pred_pixels = pred_pixels.unsqueeze(1) \n\n pred_pixels = pred_pixels.detach().cpu().float() \n gold_pixels = gold_pixels.detach().cpu().float() \n\n total_pixels = sum(pred_pixels.shape) \n\n true_pos = torch.sum(pred_pixels * gold_pixels).item() \n true_neg = torch.sum((1-pred_pixels) * (1 - gold_pixels)).item() \n false_pos = torch.sum(pred_pixels * (1 - gold_pixels)).item() \n false_neg = torch.sum((1-pred_pixels) * gold_pixels).item() \n precision = true_pos / (true_pos + false_pos + eps) \n recall = true_pos / (true_pos + false_neg + eps) \n f1 = 2 * (precision * recall) / (precision + recall + eps) \n return precision, recall, f1\n\ndef main(args):\n device = \"cpu\"\n if args.cuda is not None:\n free_gpu_id = get_free_gpu()\n if free_gpu_id > -1:\n device = f\"cuda:{free_gpu_id}\"\n #device = \"cuda:0\"\n\n device = torch.device(device) \n print(f\"On device {device}\") \n #test = torch.ones((1))\n #test = test.to(device) \n\n nlp = English()\n tokenizer = Tokenizer(nlp.vocab)\n\n dataset_reader = NavigationDatasetReader(dir = args.data_dir,\n out_path = args.out_path,\n path_width = args.path_width,\n read_limit = args.read_limit, \n batch_size = args.batch_size, \n max_len = args.max_len,\n tokenizer = tokenizer,\n shuffle = args.shuffle,\n overfit = args.overfit, \n is_bert = \"bert\" in args.embedder) \n\n checkpoint_dir = pathlib.Path(args.checkpoint_dir)\n if not checkpoint_dir.exists():\n checkpoint_dir.mkdir() \n\n if not args.test:\n with open(dataset_reader.path_dict['train'].joinpath(\"vocab.json\")) as f1:\n train_vocab = json.load(f1)\n with open(checkpoint_dir.joinpath(\"vocab.json\"), \"w\") as f1:\n json.dump(list(train_vocab), f1) \n else:\n print(f\"Reading vocab from {checkpoint_dir}\") \n with open(checkpoint_dir.joinpath(\"vocab.json\")) as f1:\n train_vocab = json.load(f1) \n\n print(f\"got data\") \n\n # construct the vocab and tokenizer \n print(f\"constructing model...\") \n # get the embedder from args \n if args.embedder == \"random\":\n embedder = RandomEmbedder(tokenizer, train_vocab, args.embedding_dim, trainable=True)\n elif args.embedder == \"glove\":\n embedder = GloveEmbedder(tokenizer, train_vocab, args.embedding_file, args.embedding_dim, trainable=True) \n elif args.embedder.startswith(\"bert\"): \n embedder = BERTEmbedder(model_name = args.embedder, max_seq_len = args.max_len) \n else:\n raise NotImplementedError(f\"No embedder {args.embedder}\") \n\n depth = 1\n encoder_cls = NavigationTransformerEncoder\n \n encoder_kwargs = dict(image_size = args.resolution,\n patch_size = args.patch_size, \n language_embedder = embedder, \n n_layers = args.n_layers,\n channels = args.channels,\n n_heads = args.n_heads,\n hidden_dim = args.hidden_dim,\n ff_dim = args.ff_dim,\n dropout = args.dropout,\n embed_dropout = args.embed_dropout,\n output_type = args.output_type, \n positional_encoding_type = args.pos_encoding_type,\n device = device,\n log_weights = args.test,\n locality_mask = args.locality_mask,\n locality_neighborhood = args.locality_neighborhood,\n init_scale = args.init_scale) \n\n # Initialize encoder \n encoder = encoder_cls(**encoder_kwargs)\n\n if args.cuda is not None:\n encoder = encoder.cuda(device) \n print(encoder) \n # construct optimizer \n optimizer = torch.optim.Adam(encoder.parameters(), lr=args.learn_rate) \n # scheduler\n scheduler = NoamLR(optimizer, model_size = args.hidden_dim, warmup_steps = args.warmup, factor = args.lr_factor) \n\n best_epoch = -1\n block_size = int((args.resolution * 4)/64) \n if not args.test:\n if not args.resume:\n try:\n os.mkdir(args.checkpoint_dir)\n except FileExistsError:\n # file exists\n try:\n assert(len(glob.glob(os.path.join(args.checkpoint_dir, \"*.th\"))) == 0)\n except AssertionError:\n raise AssertionError(f\"Output directory {args.checkpoint_dir} non-empty, will not overwrite!\") \n else:\n # resume from pre-trained \n encoder = encoder.to(\"cpu\") \n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"), map_location='cpu')\n \n encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.cuda(device) \n # get training info \n best_checkpoint_data = json.load(open(pathlib.Path(args.checkpoint_dir).joinpath(\"best_training_state.json\")))\n print(f\"best_checkpoint_data {best_checkpoint_data}\") \n best_epoch = best_checkpoint_data[\"epoch\"]\n\n # save arg config to checkpoint_dir\n with open(pathlib.Path(args.checkpoint_dir).joinpath(\"config.yaml\"), \"w\") as f1:\n dump_args = copy.deepcopy(args) \n # drop stuff we can't serialize \n del(dump_args.__dict__[\"cfg\"]) \n del(dump_args.__dict__[\"__cwd__\"]) \n del(dump_args.__dict__[\"__path__\"]) \n to_dump = dump_args.__dict__\n # dump \n yaml.safe_dump(to_dump, f1, encoding='utf-8', allow_unicode=True) \n\n else:\n # test-time, load best model \n print(f\"loading model weights from {args.checkpoint_dir}\") \n #state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"))\n #encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.to(\"cpu\") \n state_dict = torch.load(pathlib.Path(args.checkpoint_dir).joinpath(\"best.th\"), map_location='cpu')\n \n encoder.load_state_dict(state_dict, strict=True) \n encoder = encoder.cuda(device) \n\n num_blocks = 1\n # construct trainer \n trainer = NavigationTransformerTrainer(dataset_reader = dataset_reader,\n encoder = encoder,\n optimizer = optimizer, \n scheduler = scheduler, \n num_epochs = args.num_epochs,\n num_blocks = num_blocks,\n device = device,\n checkpoint_dir = args.checkpoint_dir,\n checkpoint_every = args.checkpoint_every, \n validation_limit = args.validation_limit, \n num_models_to_keep = args.num_models_to_keep,\n generate_after_n = args.generate_after_n, \n score_type=args.score_type,\n depth = depth, \n resolution = args.resolution, \n output_type = args.output_type, \n patch_size = args.patch_size,\n block_size = block_size, \n best_epoch = best_epoch,\n seed = args.seed,\n zero_weight = args.zero_weight,\n debug_image_top_k = args.debug_image_top_k,\n debug_image_threshold = args.debug_image_threshold) \n\n if not args.test:\n trainer.train() \n else:\n print(f\"evaluating\") \n acc = trainer.evaluate()\n print(f\"accuracy: {acc}\")\n\n\nif __name__ == \"__main__\":\n np.random.seed(12)\n torch.manual_seed(12)\n\n parser = configure_parser()\n args = parser.parse_args() \n\n main(args) \n\n",
"step-ids": [
10,
11,
12,
13,
15
]
}
|
[
10,
11,
12,
13,
15
] |
#Интегрирование точного решения кинетик затухания люминесценции символьным методом
#Из за сложности получаемых уравнений. Последующий подбор коэффициентов методом МНК
# и печать результата
#
import sympy as sym
def del_flu_sym(x ,t = 1 ,Ka = 1, Ktt = 0.5):
intens = x**2
return intens
x = sym.Symbol('x')
t = sym.Symbol('t')
dlfl_integral = sym.integrate(del_flu_sym(x, t), (x))
print(dlfl_integral(2))
sym.pprint(dlfl_integral)
|
normal
|
{
"blob_id": "903a431ac39734338b4d464629b4b04a87dc9e8e",
"index": 1776,
"step-1": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\n<mask token>\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-3": "<mask token>\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), x)\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-4": "import sympy as sym\n\n\ndef del_flu_sym(x, t=1, Ka=1, Ktt=0.5):\n intens = x ** 2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), x)\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-5": "#Интегрирование точного решения кинетик затухания люминесценции символьным методом\n#Из за сложности получаемых уравнений. Последующий подбор коэффициентов методом МНК\n# и печать результата\n#\n\nimport sympy as sym\n\n\ndef del_flu_sym(x ,t = 1 ,Ka = 1, Ktt = 0.5):\n intens = x**2\n return intens\n\n\nx = sym.Symbol('x')\nt = sym.Symbol('t')\ndlfl_integral = sym.integrate(del_flu_sym(x, t), (x))\nprint(dlfl_integral(2))\nsym.pprint(dlfl_integral)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Notifier(object):
<|reserved_special_token_0|>
def __init__(self):
pass
<|reserved_special_token_0|>
@abc.abstractmethod
def send(self, msg):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Notifier(object):
<|reserved_special_token_0|>
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
<|reserved_special_token_1|>
import abc
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
<|reserved_special_token_1|>
# Copyright 2014 The crabapple Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import abc
class Notifier(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def config(self, kwargs):
pass
@abc.abstractmethod
def send(self, msg):
pass
|
flexible
|
{
"blob_id": "f25351a3cb7bf583152baa8e7ec47b0f2161cb9c",
"index": 761,
"step-1": "<mask token>\n\n\nclass Notifier(object):\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n\n @abc.abstractmethod\n def send(self, msg):\n pass\n",
"step-2": "<mask token>\n\n\nclass Notifier(object):\n <mask token>\n\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def config(self, kwargs):\n pass\n\n @abc.abstractmethod\n def send(self, msg):\n pass\n",
"step-3": "<mask token>\n\n\nclass Notifier(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def config(self, kwargs):\n pass\n\n @abc.abstractmethod\n def send(self, msg):\n pass\n",
"step-4": "import abc\n\n\nclass Notifier(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def config(self, kwargs):\n pass\n\n @abc.abstractmethod\n def send(self, msg):\n pass\n",
"step-5": "# Copyright 2014 The crabapple Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport abc\n\n\nclass Notifier(object):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n pass\n\n @abc.abstractmethod\n def config(self, kwargs):\n pass\n\n @abc.abstractmethod\n def send(self, msg):\n pass\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data.head()
<|reserved_special_token_0|>
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')
data.head()
subset = data[['Survived', 'Age', 'Sex']]
<|reserved_special_token_0|>
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')
data.head()
subset = data[['Survived', 'Age', 'Sex']]
import numpy as np
import matplotlib
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv("/Users/stevenbaez/Desktop/train.csv")
# In[2]:
data.head()
# In[3]:
subset = data[['Survived','Age', 'Sex']]
# In[5]:
import numpy as np
import matplotlib
# In[20]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Embarked",
notch = False,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[17]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Pclass",
notch = True,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[ ]:
|
flexible
|
{
"blob_id": "41006ff35299aa72b69c6dc1c71a45b44dca7d6c",
"index": 1184,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndata.head()\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-3": "<mask token>\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\n<mask token>\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\ndata = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')\ndata.head()\nsubset = data[['Survived', 'Age', 'Sex']]\nimport numpy as np\nimport matplotlib\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\nsb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,\n palette='Set2', data=data, kind='box', height=4, aspect=0.7)\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sb\nimport matplotlib as mp\n\ndata = pd.read_csv(\"/Users/stevenbaez/Desktop/train.csv\")\n\n\n# In[2]:\n\n\ndata.head()\n\n\n# In[3]:\n\n\nsubset = data[['Survived','Age', 'Sex']]\n\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib\n\n\n# In[20]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Embarked\",\n notch = False,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[17]:\n\n\nsb.catplot(x=\"Age\", y=\"Sex\",\n hue=\"Survived\", col=\"Pclass\",\n notch = True,\n palette = \"Set2\",\n data=data, kind=\"box\",\n height=4, aspect=.7);\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
<|reserved_special_token_0|>
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
teams = Team.objects.all()
cars = Car.objects.order_by('-created_date').filter(is_featured=True)
all_cars = Car.objects.order_by('-created_date').all()
model_field = Car.objects.values_list('model', flat=True).distinct()
state_field = Car.objects.values_list('state', flat=True).distinct()
body_field = Car.objects.values_list('body_style', flat=True).distinct()
year_field = Car.objects.values_list('year', flat=True).distinct()
return render(request, 'pages/index.html', {'teams': teams,
'featured_cars': cars, 'all_cars': all_cars, 'model_field':
model_field, 'state_field': state_field, 'body_field': body_field,
'year_field': year_field})
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
<|reserved_special_token_1|>
from django.shortcuts import render
from .models import Team, ContactForm
from cars.models import Car
from django.contrib import messages
def index(request):
teams = Team.objects.all()
cars = Car.objects.order_by('-created_date').filter(is_featured=True)
all_cars = Car.objects.order_by('-created_date').all()
model_field = Car.objects.values_list('model', flat=True).distinct()
state_field = Car.objects.values_list('state', flat=True).distinct()
body_field = Car.objects.values_list('body_style', flat=True).distinct()
year_field = Car.objects.values_list('year', flat=True).distinct()
return render(request, 'pages/index.html', {'teams': teams,
'featured_cars': cars, 'all_cars': all_cars, 'model_field':
model_field, 'state_field': state_field, 'body_field': body_field,
'year_field': year_field})
def about(request):
teams = Team.objects.all()
return render(request, 'pages/about.html', {'teams': teams})
def service(request):
return render(request, 'pages/services.html')
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
phone = request.POST['phone']
message = request.POST['message']
cfm = ContactForm(name=name, email=email, subject=subject, phone=
phone, message=message)
cfm.save()
messages.success(request, 'Successfully Saved')
return render(request, 'pages/contact.html')
<|reserved_special_token_1|>
from django.shortcuts import render
from .models import Team,ContactForm
from cars.models import Car
from django.contrib import messages
# Create your views here.
def index(request):
teams=Team.objects.all()
cars = Car.objects.order_by("-created_date").filter(is_featured=True)
all_cars=Car.objects.order_by("-created_date").all()
model_field=Car.objects.values_list('model',flat=True).distinct()
state_field=Car.objects.values_list('state',flat=True).distinct()
body_field=Car.objects.values_list('body_style',flat=True).distinct()
year_field=Car.objects.values_list('year',flat=True).distinct()
return render(request,'pages/index.html',{'teams':teams,'featured_cars':cars,"all_cars":all_cars,'model_field':model_field,'state_field':state_field,'body_field':body_field,'year_field':year_field})
def about(request):
teams = Team.objects.all()
return render(request,'pages/about.html',{'teams':teams})
def service(request):
return render(request,'pages/services.html')
def contact(request):
if request.method == 'POST':
name=request.POST['name']
email=request.POST['email']
subject=request.POST['subject']
phone=request.POST['phone']
message=request.POST['message']
cfm=ContactForm(name=name,email=email,subject=subject,phone=phone,message=message)
cfm.save()
messages.success(request,'Successfully Saved')
return render(request,'pages/contact.html')
|
flexible
|
{
"blob_id": "eca40c37e0e437a5f4e5643f5fb7cd3e38605471",
"index": 2417,
"step-1": "<mask token>\n\n\ndef about(request):\n teams = Team.objects.all()\n return render(request, 'pages/about.html', {'teams': teams})\n\n\n<mask token>\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n subject = request.POST['subject']\n phone = request.POST['phone']\n message = request.POST['message']\n cfm = ContactForm(name=name, email=email, subject=subject, phone=\n phone, message=message)\n cfm.save()\n messages.success(request, 'Successfully Saved')\n return render(request, 'pages/contact.html')\n",
"step-2": "<mask token>\n\n\ndef about(request):\n teams = Team.objects.all()\n return render(request, 'pages/about.html', {'teams': teams})\n\n\ndef service(request):\n return render(request, 'pages/services.html')\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n subject = request.POST['subject']\n phone = request.POST['phone']\n message = request.POST['message']\n cfm = ContactForm(name=name, email=email, subject=subject, phone=\n phone, message=message)\n cfm.save()\n messages.success(request, 'Successfully Saved')\n return render(request, 'pages/contact.html')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n teams = Team.objects.all()\n cars = Car.objects.order_by('-created_date').filter(is_featured=True)\n all_cars = Car.objects.order_by('-created_date').all()\n model_field = Car.objects.values_list('model', flat=True).distinct()\n state_field = Car.objects.values_list('state', flat=True).distinct()\n body_field = Car.objects.values_list('body_style', flat=True).distinct()\n year_field = Car.objects.values_list('year', flat=True).distinct()\n return render(request, 'pages/index.html', {'teams': teams,\n 'featured_cars': cars, 'all_cars': all_cars, 'model_field':\n model_field, 'state_field': state_field, 'body_field': body_field,\n 'year_field': year_field})\n\n\ndef about(request):\n teams = Team.objects.all()\n return render(request, 'pages/about.html', {'teams': teams})\n\n\ndef service(request):\n return render(request, 'pages/services.html')\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n subject = request.POST['subject']\n phone = request.POST['phone']\n message = request.POST['message']\n cfm = ContactForm(name=name, email=email, subject=subject, phone=\n phone, message=message)\n cfm.save()\n messages.success(request, 'Successfully Saved')\n return render(request, 'pages/contact.html')\n",
"step-4": "from django.shortcuts import render\nfrom .models import Team, ContactForm\nfrom cars.models import Car\nfrom django.contrib import messages\n\n\ndef index(request):\n teams = Team.objects.all()\n cars = Car.objects.order_by('-created_date').filter(is_featured=True)\n all_cars = Car.objects.order_by('-created_date').all()\n model_field = Car.objects.values_list('model', flat=True).distinct()\n state_field = Car.objects.values_list('state', flat=True).distinct()\n body_field = Car.objects.values_list('body_style', flat=True).distinct()\n year_field = Car.objects.values_list('year', flat=True).distinct()\n return render(request, 'pages/index.html', {'teams': teams,\n 'featured_cars': cars, 'all_cars': all_cars, 'model_field':\n model_field, 'state_field': state_field, 'body_field': body_field,\n 'year_field': year_field})\n\n\ndef about(request):\n teams = Team.objects.all()\n return render(request, 'pages/about.html', {'teams': teams})\n\n\ndef service(request):\n return render(request, 'pages/services.html')\n\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n subject = request.POST['subject']\n phone = request.POST['phone']\n message = request.POST['message']\n cfm = ContactForm(name=name, email=email, subject=subject, phone=\n phone, message=message)\n cfm.save()\n messages.success(request, 'Successfully Saved')\n return render(request, 'pages/contact.html')\n",
"step-5": "from django.shortcuts import render\nfrom .models import Team,ContactForm\nfrom cars.models import Car\nfrom django.contrib import messages\n# Create your views here.\ndef index(request):\n teams=Team.objects.all()\n cars = Car.objects.order_by(\"-created_date\").filter(is_featured=True)\n all_cars=Car.objects.order_by(\"-created_date\").all()\n model_field=Car.objects.values_list('model',flat=True).distinct()\n state_field=Car.objects.values_list('state',flat=True).distinct()\n body_field=Car.objects.values_list('body_style',flat=True).distinct()\n year_field=Car.objects.values_list('year',flat=True).distinct()\n return render(request,'pages/index.html',{'teams':teams,'featured_cars':cars,\"all_cars\":all_cars,'model_field':model_field,'state_field':state_field,'body_field':body_field,'year_field':year_field})\n\n\ndef about(request):\n teams = Team.objects.all()\n return render(request,'pages/about.html',{'teams':teams})\n\ndef service(request):\n return render(request,'pages/services.html')\n\n\ndef contact(request):\n if request.method == 'POST':\n name=request.POST['name']\n email=request.POST['email']\n subject=request.POST['subject']\n phone=request.POST['phone']\n message=request.POST['message']\n cfm=ContactForm(name=name,email=email,subject=subject,phone=phone,message=message)\n cfm.save()\n messages.success(request,'Successfully Saved')\n\n return render(request,'pages/contact.html')",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for ln in fh:
if ln.startswith('From'):
if ln.startswith('From:'):
continue
else:
word = ln.split()
lst1.append(word[1])
for word in lst1:
data[word] = data.get(word, 0) + 1
<|reserved_special_token_0|>
for word, count in data.items():
if bigcount is None or bigcount < count:
bigcount = count
bigword = word
print(bigword, bigcount)
<|reserved_special_token_1|>
fname = input('Enter the file name to open')
fh = open(fname)
lst1 = list()
data = dict()
for ln in fh:
if ln.startswith('From'):
if ln.startswith('From:'):
continue
else:
word = ln.split()
lst1.append(word[1])
for word in lst1:
data[word] = data.get(word, 0) + 1
bigcount = None
bigword = None
for word, count in data.items():
if bigcount is None or bigcount < count:
bigcount = count
bigword = word
print(bigword, bigcount)
<|reserved_special_token_1|>
fname = input('Enter the file name to open')
fh = open(fname)
lst1 = list()
data = dict()
for ln in fh :
if ln.startswith("From"):
if ln.startswith('From:'):
continue
else :
word = ln.split()
lst1.append(word[1])
for word in lst1:
data[word] = data.get(word,0)+1
bigcount = None
bigword = None
for word,count in data.items():
if bigcount is None or bigcount<count:
bigcount = count
bigword = word
print(bigword,bigcount)
|
flexible
|
{
"blob_id": "4fba13d051a3aceb393a4473cdbf6d4fc684c7ac",
"index": 9473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\n<mask token>\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n",
"step-3": "fname = input('Enter the file name to open')\nfh = open(fname)\nlst1 = list()\ndata = dict()\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\nbigcount = None\nbigword = None\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n",
"step-4": "fname = input('Enter the file name to open')\r\nfh = open(fname)\r\nlst1 = list()\r\ndata = dict()\r\nfor ln in fh :\r\n if ln.startswith(\"From\"):\r\n if ln.startswith('From:'):\r\n continue\r\n else :\r\n word = ln.split()\r\n lst1.append(word[1])\r\nfor word in lst1:\r\n data[word] = data.get(word,0)+1\r\nbigcount = None\r\nbigword = None\r\nfor word,count in data.items():\r\n if bigcount is None or bigcount<count:\r\n bigcount = count\r\n bigword = word\r\nprint(bigword,bigcount)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')
MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(
'MASTER_MYSQL_DATABASE_PASSWORD')
MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')
MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(
'MASTER_MYSQL_DATABASE_DB_CASAONE')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,
MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,
MASTER_MYSQL_DATABASE_DB_CASAONE)
SQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}
<|reserved_special_token_1|>
import os
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')
MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(
'MASTER_MYSQL_DATABASE_PASSWORD')
MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')
MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(
'MASTER_MYSQL_DATABASE_DB_CASAONE')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,
MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,
MASTER_MYSQL_DATABASE_DB_CASAONE)
SQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}
<|reserved_special_token_1|>
import os
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')
MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')
MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')
MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')
# SQLALCHEMY_POOL_RECYCLE = 60 * 10
# SQLALCHEMY_POOL_TIMEOUT = 60 * 20
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,
MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)
SQLALCHEMY_ENGINE_OPTIONS = {
"pool_pre_ping": True
}
|
flexible
|
{
"blob_id": "8247b045a5aed4d0f3db6bc2c0edd985f2c4ba30",
"index": 5305,
"step-1": "<mask token>\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n",
"step-4": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n",
"step-5": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')\n\n# SQLALCHEMY_POOL_RECYCLE = 60 * 10\n# SQLALCHEMY_POOL_TIMEOUT = 60 * 20\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\n\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,\n MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)\n\nSQLALCHEMY_ENGINE_OPTIONS = {\n \"pool_pre_ping\": True\n}\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = create_engine(SQLALCHEMY_DATABASE_URI)
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
<|reserved_special_token_1|>
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from models.base import metadata
from sqlalchemy import create_engine
import os.path
engine = create_engine(SQLALCHEMY_DATABASE_URI)
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
<|reserved_special_token_1|>
#!../virtual_env/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from models.base import metadata
from sqlalchemy import create_engine
import os.path
engine = create_engine(SQLALCHEMY_DATABASE_URI)
metadata.create_all(engine)
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
|
flexible
|
{
"blob_id": "9bbf0953d228c970764b8ba94675346820bc5d90",
"index": 3006,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-3": "<mask token>\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-4": "from migrate.versioning import api\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom config import SQLALCHEMY_MIGRATE_REPO\nfrom models.base import metadata\nfrom sqlalchemy import create_engine\nimport os.path\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\nmetadata.create_all(engine)\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,\n api.version(SQLALCHEMY_MIGRATE_REPO))\n",
"step-5": "#!../virtual_env/bin/python\nfrom migrate.versioning import api\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom config import SQLALCHEMY_MIGRATE_REPO\nfrom models.base import metadata\nfrom sqlalchemy import create_engine\n\nimport os.path\n\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\n\nmetadata.create_all(engine)\n\nif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):\n api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)\nelse:\n api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
chrome_options = webdriver.ChromeOptions()
chrome_profile = {'profile.managed_default_content_settings.images': 2}
chrome_options.add_experimental_option('prefs', chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = By.ID, id
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.
presence_of_element_located(locator))
print(u'成功访问搜索引擎!')
except Exception as e:
print(e)
print(u'搜索引擎未加载成功,浏览器将被退出!')
driver.quit()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
SELENIUM_TIMEOUT = 12
def get_browser_driver():
"""获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染"""
try:
# PhantomJS 设置不加载图片
driver = webdriver.PhantomJS(service_args=['--load-images=no'])
except WebDriverException:
# chrome 设置不加载图片
chrome_options = webdriver.ChromeOptions()
chrome_profile = {"profile.managed_default_content_settings.images": 2}
chrome_options.add_experimental_option("prefs", chrome_profile)
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.set_page_load_timeout(SELENIUM_TIMEOUT)
driver.implicitly_wait(SELENIUM_TIMEOUT)
return driver
def wait_driver(driver, id, wait_time, watch_step):
locator = (By.ID, id)
try:
WebDriverWait(driver, wait_time, watch_step).until(EC.presence_of_element_located(locator))
print(u"成功访问搜索引擎!")
except Exception as e:
print(e)
print(u"搜索引擎未加载成功,浏览器将被退出!")
driver.quit()
|
flexible
|
{
"blob_id": "5ab877ef15cdcd52463b1567c28327dc2eeea2de",
"index": 1204,
"step-1": "<mask token>\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-3": "<mask token>\nSELENIUM_TIMEOUT = 12\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-4": "from selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nSELENIUM_TIMEOUT = 12\n\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {'profile.managed_default_content_settings.images': 2}\n chrome_options.add_experimental_option('prefs', chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = By.ID, id\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.\n presence_of_element_located(locator))\n print(u'成功访问搜索引擎!')\n except Exception as e:\n print(e)\n print(u'搜索引擎未加载成功,浏览器将被退出!')\n driver.quit()\n",
"step-5": "from selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nSELENIUM_TIMEOUT = 12\n\ndef get_browser_driver():\n \"\"\"获取浏览器服务 使用后记得 driver.quit() 否则容易引起状态污染\"\"\"\n try:\n # PhantomJS 设置不加载图片\n driver = webdriver.PhantomJS(service_args=['--load-images=no'])\n except WebDriverException:\n # chrome 设置不加载图片\n chrome_options = webdriver.ChromeOptions()\n chrome_profile = {\"profile.managed_default_content_settings.images\": 2}\n chrome_options.add_experimental_option(\"prefs\", chrome_profile)\n driver = webdriver.Chrome(chrome_options=chrome_options)\n driver.set_page_load_timeout(SELENIUM_TIMEOUT)\n driver.implicitly_wait(SELENIUM_TIMEOUT)\n return driver\n\ndef wait_driver(driver, id, wait_time, watch_step):\n locator = (By.ID, id)\n try:\n WebDriverWait(driver, wait_time, watch_step).until(EC.presence_of_element_located(locator))\n print(u\"成功访问搜索引擎!\")\n except Exception as e:\n print(e)\n print(u\"搜索引擎未加载成功,浏览器将被退出!\")\n driver.quit()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def ehcf(a, b):
p1, q1, h1, p2, q2, h2 = 1, 0, a, 0, 1, b
from math import floor
while h2 != 0:
r = floor(h1/h2)
p3 = p1-r*p2
q3 = q1-r*q2
h3 = h1-r*h2
p1,q1,h1,p2,q2,h2 = p2,q2,h2,p3,q3,h3
return (p1, q1, h1)
def findinverse(k, p):
l = ehcf(k,p)[0] % p
return l
|
normal
|
{
"blob_id": "d7b426727e11833b3825baac7b379f5ce44ea491",
"index": 5495,
"step-1": "<mask token>\n",
"step-2": "def ehcf(a, b):\n p1, q1, h1, p2, q2, h2 = 1, 0, a, 0, 1, b\n from math import floor\n while h2 != 0:\n r = floor(h1 / h2)\n p3 = p1 - r * p2\n q3 = q1 - r * q2\n h3 = h1 - r * h2\n p1, q1, h1, p2, q2, h2 = p2, q2, h2, p3, q3, h3\n return p1, q1, h1\n\n\n<mask token>\n",
"step-3": "def ehcf(a, b):\n p1, q1, h1, p2, q2, h2 = 1, 0, a, 0, 1, b\n from math import floor\n while h2 != 0:\n r = floor(h1 / h2)\n p3 = p1 - r * p2\n q3 = q1 - r * q2\n h3 = h1 - r * h2\n p1, q1, h1, p2, q2, h2 = p2, q2, h2, p3, q3, h3\n return p1, q1, h1\n\n\ndef findinverse(k, p):\n l = ehcf(k, p)[0] % p\n return l\n",
"step-4": "def ehcf(a, b):\n p1, q1, h1, p2, q2, h2 = 1, 0, a, 0, 1, b\n from math import floor\n while h2 != 0:\n r = floor(h1/h2)\n p3 = p1-r*p2\n q3 = q1-r*q2\n h3 = h1-r*h2\n p1,q1,h1,p2,q2,h2 = p2,q2,h2,p3,q3,h3\n return (p1, q1, h1)\n\ndef findinverse(k, p):\n l = ehcf(k,p)[0] % p\n return l",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('Cbrowser', '0002_links_l_title')]
operations = [migrations.AddField(model_name='student', name='dp',
field=models.CharField(default=
'https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg'
, max_length=1000)), migrations.AddField(model_name='student', name
='gpa', field=models.IntegerField(default=0))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('Cbrowser', '0002_links_l_title')]
operations = [migrations.AddField(model_name='student', name='dp',
field=models.CharField(default=
'https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg'
, max_length=1000)), migrations.AddField(model_name='student', name
='gpa', field=models.IntegerField(default=0))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-26 20:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Cbrowser', '0002_links_l_title'),
]
operations = [
migrations.AddField(
model_name='student',
name='dp',
field=models.CharField(default='https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg', max_length=1000),
),
migrations.AddField(
model_name='student',
name='gpa',
field=models.IntegerField(default=0),
),
]
|
flexible
|
{
"blob_id": "ffd11d49f8499b4bfec8f17d07b66d899dd23d2e",
"index": 6924,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Cbrowser', '0002_links_l_title')]\n operations = [migrations.AddField(model_name='student', name='dp',\n field=models.CharField(default=\n 'https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg'\n , max_length=1000)), migrations.AddField(model_name='student', name\n ='gpa', field=models.IntegerField(default=0))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Cbrowser', '0002_links_l_title')]\n operations = [migrations.AddField(model_name='student', name='dp',\n field=models.CharField(default=\n 'https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg'\n , max_length=1000)), migrations.AddField(model_name='student', name\n ='gpa', field=models.IntegerField(default=0))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-02-26 20:13\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Cbrowser', '0002_links_l_title'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='student',\n name='dp',\n field=models.CharField(default='https://thebenclark.files.wordpress.com/2014/03/facebook-default-no-profile-pic.jpg', max_length=1000),\n ),\n migrations.AddField(\n model_name='student',\n name='gpa',\n field=models.IntegerField(default=0),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Task:
<|reserved_special_token_0|>
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
<|reserved_special_token_0|>
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
<|reserved_special_token_0|>
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task:
<|reserved_special_token_0|>
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
<|reserved_special_token_0|>
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
IMGUR_BASE = 'https://api.imgur.com'
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
<|reserved_special_token_1|>
import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = 'https://api.imgur.com'
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = 'pending'
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b"', '').replace('"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {'created': self.created, 'finished': self.finished,
'status': self.status, 'uploaded': {'pending': self.pending,
'complete': self.complete, 'failed': self.failed}}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v, url = self.upload_image(path=None, url=val, title=None,
description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return 'Queue Empty!'
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=
None, album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError('Either path or url must be given.')
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,
'description': description}
token = ast.literal_eval(str(self.credentials))['access_token']
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=
authentication, verify=verify)
if 'error' in json.loads(resp.content)['data']:
return False, json.loads(resp.content)['data']['error']
else:
return True, json.loads(resp.content)['data']['link']
<|reserved_special_token_1|>
import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = "https://api.imgur.com"
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = "pending"
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b\"', '').replace('\"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {
"created": self.created,
"finished": self.finished,
"status": self.status,
"uploaded": {
"pending": self.pending,
"complete": self.complete,
"failed": self.failed
}
}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
# Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return ("Queue Empty!")
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': "58tq5Nw", 'image': image,
'title': title, 'description': description}
token = ast.literal_eval(str(self.credentials))["access_token"]
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + "/3/image", payload, headers=authentication, verify=verify)
if 'error' in json.loads(resp.content)["data"]:
return False, json.loads(resp.content)["data"]["error"]
else:
return True, json.loads(resp.content)["data"]["link"]
|
flexible
|
{
"blob_id": "63ee99012089dcb0e5b41860c95e13fff52c6731",
"index": 1546,
"step-1": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n <mask token>\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-2": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-3": "<mask token>\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-4": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-5": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\n\nIMGUR_BASE = \"https://api.imgur.com\"\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = \"pending\"\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\\\"', '').replace('\\\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {\n \"created\": self.created,\n \"finished\": self.finished,\n \"status\": self.status,\n \"uploaded\": {\n \"pending\": self.pending,\n \"complete\": self.complete,\n \"failed\": self.failed\n }\n }\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n # Checking to avoid duplicate entry (not mandatory)\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return (\"Queue Empty!\")\n\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=None,\n album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError(\"Either path or url must be given.\")\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': \"58tq5Nw\", 'image': image,\n 'title': title, 'description': description}\n\n token = ast.literal_eval(str(self.credentials))[\"access_token\"]\n\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + \"/3/image\", payload, headers=authentication, verify=verify)\n if 'error' in json.loads(resp.content)[\"data\"]:\n return False, json.loads(resp.content)[\"data\"][\"error\"]\n else:\n return True, json.loads(resp.content)[\"data\"][\"link\"]\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
from .start_node import StartNode
from .character_appearance import CharacterAppearance
from .character_disappearance import CharacterDisappearance
from .replica import Replica
from .end_node import EndNode
from .choice import Choice
from .set_landscape import SetLandscape
from .add_item import AddItem
from .switch_by_item import SwitchByItem
|
normal
|
{
"blob_id": "cd6e15daa2360ead47f0bac95843b1c030164996",
"index": 6879,
"step-1": "<mask token>\n",
"step-2": "from .start_node import StartNode\nfrom .character_appearance import CharacterAppearance\nfrom .character_disappearance import CharacterDisappearance\nfrom .replica import Replica\nfrom .end_node import EndNode\nfrom .choice import Choice\nfrom .set_landscape import SetLandscape\nfrom .add_item import AddItem\nfrom .switch_by_item import SwitchByItem\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info.major == 2:
from itertools import izip
else:
izip = zip
<|reserved_special_token_1|>
import sys
if sys.version_info.major == 2:
from itertools import izip
else:
izip = zip
|
flexible
|
{
"blob_id": "88445d8466d7acbf29d2525c7e322611d66494cd",
"index": 8315,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n",
"step-3": "import sys\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright (c) 2008 Johns Hopkins University.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written
# agreement is hereby granted, provided that the above copyright
# notice, the (updated) modification history and the author appear in
# all copies of this source code.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,
# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>
"""A library that implements the T2 serial communication.
This library has two parts: one that deals with sending and receiving
packets using the serial format from T2 (TEP113) and a second one that
tries to simplifies the work with arbitrary packets.
"""
import sys, struct, time, serial, socket, operator, thread
import Queue
from threading import Lock, Condition
__version__ = "$Id: tos.py,v 1.1 2008/05/17 01:17:03 razvanm Exp $"
__all__ = ['Serial', 'AM',
'Packet', 'RawPacket',
'AckFrame', 'DataFrame', 'NoAckDataFrame',
'ActiveMessage']
def list2hex(v):
return " ".join(["%02x" % p for p in v])
class Serial:
"""
A Serial object offers a way to send and data using a HDLC-like
formating.
"""
HDLC_FLAG_BYTE = 0x7e
HDLC_CTLESC_BYTE = 0x7d
TOS_SERIAL_ACTIVE_MESSAGE_ID = 0
TOS_SERIAL_CC1000_ID = 1
TOS_SERIAL_802_15_4_ID = 2
TOS_SERIAL_UNKNOWN_ID = 255
SERIAL_PROTO_ACK = 67
SERIAL_PROTO_PACKET_ACK = 68
SERIAL_PROTO_PACKET_NOACK = 69
SERIAL_PROTO_PACKET_UNKNOWN = 255
def __init__(self, port, baudrate, flush=False, debug=False, qsize=10):
self._debug = debug
self._in_queue = Queue.Queue(qsize)
self._out_lock = Lock()
self._out_ack = Condition()
self._seqno = 0
self._ack = None
self._write_counter = 0
self._write_counter_failures = 0
self._read_counter = 0
self._ts = None
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)
self._s.flushInput()
start = time.time();
if flush:
print >>sys.stdout, "Flushing the serial port",
while time.time() - start < 1:
p = self._read()
sys.stdout.write(".")
if not self._debug:
sys.stdout.write("\n")
self._s.close()
self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)
thread.start_new_thread(self.run, ())
def run(self):
while True:
p = self._read()
self._read_counter += 1
if self._debug:
print "Serial:run: got a packet(%d): %s" % (self._read_counter, p)
ack = AckFrame(p.data)
if ack.protocol == self.SERIAL_PROTO_ACK:
if not self._ack:
self._ack = ack
if self._debug:
print "Serial:run: got an ack:", ack
self._ack = ack
# Wake up the writer
self._out_ack.acquire()
self._out_ack.notify()
self._out_ack.release()
else:
ampkt = ActiveMessage(NoAckDataFrame(p.data).data)
if ampkt.type == 100:
for t in "".join([chr(i) for i in ampkt.data]).strip('\n\0').split('\n'):
print "PRINTF:", t.strip('\n')
else:
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
self._in_queue.put(p, block=False)
# Returns the next incoming serial packet
def _read(self):
"""Wait for a packet and return it as a RawPacket."""
try:
d = self._get_byte()
ts = time.time()
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
packet = [d]
d = self._get_byte()
if d == self.HDLC_FLAG_BYTE:
d = self._get_byte()
ts = time.time()
else:
packet.append(d)
while d != self.HDLC_FLAG_BYTE:
d = self._get_byte()
packet.append(d)
if self._debug == True:
print "Serial:_read: unescaped", packet
packet = self._unescape(packet)
crc = self._crc16(0, packet[1:-3])
packet_crc = self._decode(packet[-3:-1])
if crc != packet_crc:
print "Warning: wrong CRC! %x != %x %s" % (crc, packet_crc, ["%2x" % i for i in packet])
if self._debug:
if self._ts == None:
self._ts = ts
else:
print "Serial:_read: %.4f (%.4f) Recv:" % (ts, ts - self._ts), self._format_packet(packet[1:-3])
self._ts = ts
return RawPacket(ts, packet[1:-3], crc == packet_crc)
except socket.timeout:
return None
def read(self, timeout=None):
start = time.time();
done = False
while not done:
p = None
while p == None:
if timeout == 0 or time.time() - start < timeout:
try:
p = self._in_queue.get(True, timeout)
except Queue.Empty:
return None
else:
return None
if p.crc:
done = True
else:
p = None
# In the current TinyOS the packets from the mote are always NoAckDataFrame
return NoAckDataFrame(p.data)
def write(self, payload):
"""
Write a packet. If the payload argument is a list, it is
assumed to be exactly the payload. Otherwise the payload is
assume to be a Packet and the real payload is obtain by
calling the .payload().
"""
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
self._out_lock.acquire()
self._seqno = (self._seqno + 1) % 100
packet = DataFrame();
packet.protocol = self.SERIAL_PROTO_PACKET_ACK
packet.seqno = self._seqno
packet.dispatch = 0
packet.data = payload
packet = packet.payload()
crc = self._crc16(0, packet)
packet.append(crc & 0xff)
packet.append((crc >> 8) & 0xff)
packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]
while True:
self._put_bytes(packet)
self._write_counter += 1
if self._debug == True:
print "Send(%d/%d): %s" % (self._write_counter, self._write_counter_failures, packet)
print "Wait for ack %d ..." % (self._seqno)
self._out_ack.acquire()
self._out_ack.wait(0.2)
if self._debug:
print "Wait for ack %d done. Latest ack:" % (self._seqno), self._ack
self._out_ack.release()
if self._ack and self._ack.seqno == self._seqno:
if self._debug:
print "The packet was acked."
self._out_lock.release()
if self._debug:
print "Returning from Serial.write..."
return True
else:
self._write_counter_failures += 1
if self._debug:
print "The packet was not acked. Try again."
# break # make only one sending attempt
self._out_lock.release()
return False
def _format_packet(self, payload):
f = NoAckDataFrame(payload)
if f.protocol == self.SERIAL_PROTO_ACK:
rpacket = AckFrame(payload)
return "Ack seqno: %d" % (rpacket.seqno)
else:
rpacket = ActiveMessage(f.data)
return "D: %04x S: %04x L: %02x G: %02x T: %02x | %s" % \
(rpacket.destination, rpacket.source,
rpacket.length, rpacket.group, rpacket.type,
list2hex(rpacket.data))
def _crc16(self, base_crc, frame_data):
crc = base_crc
for b in frame_data:
crc = crc ^ (b << 8)
for i in range(0, 8):
if crc & 0x8000 == 0x8000:
crc = (crc << 1) ^ 0x1021
else:
crc = crc << 1
crc = crc & 0xffff
return crc
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(val & 0xFF)
val = val >> 8
return output
def _decode(self, v):
r = long(0)
for i in v[::-1]:
r = (r << 8) + i
return r
def _get_byte(self):
try:
r = struct.unpack("B", self._s.read())[0]
return r
except struct.error:
# Serial port read timeout
raise socket.timeout
def _put_bytes(self, data):
#print "DEBUG: _put_bytes:", data
for b in data:
self._s.write(struct.pack('B', b))
def _unescape(self, packet):
r = []
esc = False
for b in packet:
if esc:
r.append(b ^ 0x20)
esc = False
elif b == self.HDLC_CTLESC_BYTE:
esc = True
else:
r.append(b)
return r
def _escape(self, packet):
r = []
for b in packet:
if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:
r.append(self.HDLC_CTLESC_BYTE)
r.append(b ^ 0x20)
else:
r.append(b)
return r
def debug(self, debug):
self._debug = debug
class SFClient:
def __init__(self, host, port, qsize=10):
self._in_queue = Queue(qsize)
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.connect((host, port))
data = self._s.recv(2)
if data != 'U ':
print "Wrong handshake"
self._s.send("U ")
print "Connected"
thread.start_new_thread(self.run, ())
def run(self):
while True:
length = ord(self._s.recv(1))
data = self._s.recv(length)
data = [ord(c) for c in data][1:]
#print "Recv %d bytes" % (length), ActiveMessage(data)
if self._in_queue.full():
print "Warning: Buffer overflow"
self._in_queue.get()
p = RawPacket()
p.crc = 1
p.data = data
self._in_queue.put(p, block=False)
def read(self, timeout=0):
return self._in_queue.get()
def write(self, payload):
print "SFClient: write:", payload
if type(payload) != type([]):
# Assume this will be derived from Packet
payload = payload.payload()
payload = [0] + payload
self._s.send(chr(len(payload)))
self._s.send(''.join([chr(c) for c in payload]))
return True
class AM:
def __init__(self, s):
self._s = s
def read(self, timeout=None):
frame = self._s.read(timeout)
if frame:
return ActiveMessage(frame.data)
return frame
def write(self, packet, amid):
return self._s.write(ActiveMessage(packet, amid=amid))
class Packet:
"""
The Packet class offers a handy way to build pack and unpack
binary data based on a given pattern.
"""
def _decode(self, v):
r = long(0)
for i in v:
r = (r << 8) + i
return r
def _encode(self, val, dim):
output = []
for i in range(dim):
output.append(int(val & 0xFF))
val = val >> 8
output.reverse()
return output
def __init__(self, desc, packet = None):
offset = 0
boffset = 0
sum = 0
for i in range(len(desc)-1, -1, -1):
(n, t, s) = desc[i]
if s == None:
if sum > 0:
desc[i] = (n, t, -sum)
break
sum += s
self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]
self.__dict__['_names'] = [n for (n, t, s) in desc]
self.__dict__['_values'] = []
if type(packet) == type([]):
for (t, s) in self._schema:
if t == 'int':
self._values.append(self._decode(packet[offset:offset + s]))
offset += s
elif t == 'bint':
doffset = 8 - (boffset + s)
self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))
boffset += s
if boffset == 8:
offset += 1
boffset = 0
elif t == 'string':
self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))
offset += s
elif t == 'blob':
if s:
if s > 0:
self._values.append(packet[offset:offset + s])
offset += s
else:
self._values.append(packet[offset:s])
offset = len(packet) + s
else:
self._values.append(packet[offset:])
elif type(packet) == type(()):
for i in packet:
self._values.append(i)
else:
for v in self._schema:
self._values.append(None)
def __repr__(self):
return self._values.__repr__()
def __str__(self):
r = ""
for i in range(len(self._names)):
r += "%s: %s " % (self._names[i], self._values[i])
for i in range(len(self._names), len(self._values)):
r += "%s" % self._values[i]
return r
# return self._values.__str__()
# Implement the map behavior
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __len__(self):
return len(self._values)
def keys(self):
return self._names
def values(self):
return self._names
# Implement the struct behavior
def __getattr__(self, name):
#print "DEBUG: __getattr__", name
if type(name) == type(0):
return self._names[name]
else:
return self._values[self._names.index(name)]
def __setattr__(self, name, value):
if type(name) == type(0):
self._values[name] = value
else:
self._values[self._names.index(name)] = value
def __ne__(self, other):
if other.__class__ == self.__class__:
return self._values != other._values
else:
return True
def __eq__(self, other):
if other.__class__ == self.__class__:
return self._values == other._values
else:
return False
def __nonzero__(self):
return True;
# Custom
def names(self):
return self._names
def sizes(self):
return self._schema
def payload(self):
r = []
boffset = 0
for i in range(len(self._schema)):
(t, s) = self._schema[i]
if t == 'int':
r += self._encode(self._values[i], s)
boffset = 0
elif t == 'bint':
doffset = 8 - (boffset + s)
if boffset == 0:
r += [self._values[i] << doffset]
else:
r[-1] |= self._values[i] << doffset
boffset += s
if boffset == 8:
boffset = 0
elif self._values[i] != []:
r += self._values[i]
for i in self._values[len(self._schema):]:
r += i
return r
class RawPacket(Packet):
def __init__(self, ts = None, data = None, crc = None):
Packet.__init__(self,
[('ts' , 'int', 4),
('crc', 'int', 1),
('data', 'blob', None)],
None)
self.ts = ts;
self.data = data
self.crc = crc
class AckFrame(Packet):
def __init__(self, payload = None):
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1)],
payload)
class DataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('seqno', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class NoAckDataFrame(Packet):
def __init__(self, payload = None):
if payload != None and type(payload) != type([]):
# Assume is a Packet
payload = payload.payload()
Packet.__init__(self,
[('protocol', 'int', 1),
('dispatch', 'int', 1),
('data', 'blob', None)],
payload)
class ActiveMessage(Packet):
def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):
if type(gpacket) == type([]):
payload = gpacket
else:
# Assume this will be derived from Packet
payload = None
Packet.__init__(self,
[('destination', 'int', 2),
('source', 'int', 2),
('length', 'int', 1),
('group', 'int', 1),
('type', 'int', 1),
('data', 'blob', None)],
payload)
if payload == None:
self.destination = dest
self.source = 0x0000
self.group = 0x00
self.type = amid
self.data = []
if gpacket:
self.data = gpacket.payload()
self.length = len(self.data)
|
normal
|
{
"blob_id": "f614287a2a118484b67f2b16e429a3335416d186",
"index": 3738,
"step-1": "# Copyright (c) 2008 Johns Hopkins University.\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software and its\n# documentation for any purpose, without fee, and without written\n# agreement is hereby granted, provided that the above copyright\n# notice, the (updated) modification history and the author appear in\n# all copies of this source code.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, LOSS OF USE, DATA,\n# OR PROFITS) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n\n# @author Razvan Musaloiu-E. <razvanm@cs.jhu.edu>\n\n\"\"\"A library that implements the T2 serial communication.\n\nThis library has two parts: one that deals with sending and receiving\npackets using the serial format from T2 (TEP113) and a second one that\ntries to simplifies the work with arbitrary packets.\n\n\"\"\"\n\nimport sys, struct, time, serial, socket, operator, thread\nimport Queue\nfrom threading import Lock, Condition\n\n__version__ = \"$Id: tos.py,v 1.1 2008/05/17 01:17:03 razvanm Exp $\"\n\n__all__ = ['Serial', 'AM',\n 'Packet', 'RawPacket',\n 'AckFrame', 'DataFrame', 'NoAckDataFrame',\n 'ActiveMessage']\n\ndef list2hex(v):\n return \" \".join([\"%02x\" % p for p in v])\n\nclass Serial:\n \"\"\"\n A Serial object offers a way to send and data using a HDLC-like\n formating.\n \"\"\"\n \n HDLC_FLAG_BYTE = 0x7e\n HDLC_CTLESC_BYTE = 0x7d\n \n TOS_SERIAL_ACTIVE_MESSAGE_ID = 0\n TOS_SERIAL_CC1000_ID = 1\n TOS_SERIAL_802_15_4_ID = 2\n TOS_SERIAL_UNKNOWN_ID = 255\n \n SERIAL_PROTO_ACK = 67\n SERIAL_PROTO_PACKET_ACK = 68\n SERIAL_PROTO_PACKET_NOACK = 69\n SERIAL_PROTO_PACKET_UNKNOWN = 255\n \n def __init__(self, port, baudrate, flush=False, debug=False, qsize=10):\n self._debug = debug\n self._in_queue = Queue.Queue(qsize)\n self._out_lock = Lock()\n self._out_ack = Condition()\n self._seqno = 0\n self._ack = None\n self._write_counter = 0\n self._write_counter_failures = 0\n self._read_counter = 0\n self._ts = None\n\n self._s = serial.Serial(port, baudrate, rtscts=0, timeout=0.5)\n self._s.flushInput()\n start = time.time();\n if flush:\n print >>sys.stdout, \"Flushing the serial port\",\n while time.time() - start < 1:\n p = self._read()\n sys.stdout.write(\".\")\n if not self._debug:\n sys.stdout.write(\"\\n\")\n self._s.close()\n self._s = serial.Serial(port, baudrate, rtscts=0, timeout=None)\n\n thread.start_new_thread(self.run, ())\n\n def run(self):\n \n while True:\n p = self._read()\n self._read_counter += 1\n if self._debug:\n print \"Serial:run: got a packet(%d): %s\" % (self._read_counter, p)\n ack = AckFrame(p.data)\n if ack.protocol == self.SERIAL_PROTO_ACK:\n if not self._ack:\n self._ack = ack\n if self._debug:\n print \"Serial:run: got an ack:\", ack\n self._ack = ack\n # Wake up the writer\n self._out_ack.acquire()\n self._out_ack.notify()\n self._out_ack.release()\n else:\n ampkt = ActiveMessage(NoAckDataFrame(p.data).data)\n if ampkt.type == 100:\n for t in \"\".join([chr(i) for i in ampkt.data]).strip('\\n\\0').split('\\n'):\n print \"PRINTF:\", t.strip('\\n')\n else:\n if self._in_queue.full():\n print \"Warning: Buffer overflow\"\n self._in_queue.get()\n self._in_queue.put(p, block=False)\n\n\n # Returns the next incoming serial packet\n def _read(self):\n \"\"\"Wait for a packet and return it as a RawPacket.\"\"\"\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None\n\n\n def read(self, timeout=None):\n start = time.time();\n done = False\n while not done:\n p = None\n while p == None:\n if timeout == 0 or time.time() - start < timeout:\n try:\n p = self._in_queue.get(True, timeout)\n except Queue.Empty:\n return None\n else:\n return None\n if p.crc:\n done = True\n else:\n p = None\n # In the current TinyOS the packets from the mote are always NoAckDataFrame\n return NoAckDataFrame(p.data)\n\n def write(self, payload):\n \"\"\"\n Write a packet. If the payload argument is a list, it is\n assumed to be exactly the payload. Otherwise the payload is\n assume to be a Packet and the real payload is obtain by\n calling the .payload().\n \"\"\"\n \n if type(payload) != type([]):\n # Assume this will be derived from Packet\n payload = payload.payload()\n self._out_lock.acquire()\n self._seqno = (self._seqno + 1) % 100\n packet = DataFrame();\n packet.protocol = self.SERIAL_PROTO_PACKET_ACK\n packet.seqno = self._seqno\n packet.dispatch = 0\n packet.data = payload\n packet = packet.payload()\n crc = self._crc16(0, packet)\n packet.append(crc & 0xff)\n packet.append((crc >> 8) & 0xff)\n packet = [self.HDLC_FLAG_BYTE] + self._escape(packet) + [self.HDLC_FLAG_BYTE]\n\n while True:\n self._put_bytes(packet)\n self._write_counter += 1\n if self._debug == True:\n print \"Send(%d/%d): %s\" % (self._write_counter, self._write_counter_failures, packet)\n print \"Wait for ack %d ...\" % (self._seqno)\n self._out_ack.acquire()\n self._out_ack.wait(0.2)\n if self._debug:\n print \"Wait for ack %d done. Latest ack:\" % (self._seqno), self._ack\n self._out_ack.release()\n if self._ack and self._ack.seqno == self._seqno:\n if self._debug:\n print \"The packet was acked.\"\n self._out_lock.release()\n if self._debug:\n print \"Returning from Serial.write...\"\n return True\n else:\n self._write_counter_failures += 1\n if self._debug:\n print \"The packet was not acked. Try again.\"\n # break # make only one sending attempt\n self._out_lock.release()\n return False\n\n\n def _format_packet(self, payload):\n f = NoAckDataFrame(payload)\n if f.protocol == self.SERIAL_PROTO_ACK:\n rpacket = AckFrame(payload)\n return \"Ack seqno: %d\" % (rpacket.seqno)\n else:\n rpacket = ActiveMessage(f.data)\n return \"D: %04x S: %04x L: %02x G: %02x T: %02x | %s\" % \\\n (rpacket.destination, rpacket.source,\n rpacket.length, rpacket.group, rpacket.type,\n list2hex(rpacket.data))\n\n def _crc16(self, base_crc, frame_data):\n crc = base_crc\n for b in frame_data:\n crc = crc ^ (b << 8)\n for i in range(0, 8):\n if crc & 0x8000 == 0x8000:\n crc = (crc << 1) ^ 0x1021\n else:\n crc = crc << 1\n crc = crc & 0xffff\n return crc\n \n def _encode(self, val, dim):\n output = []\n for i in range(dim):\n output.append(val & 0xFF)\n val = val >> 8\n return output\n \n def _decode(self, v):\n r = long(0)\n for i in v[::-1]:\n r = (r << 8) + i\n return r\n \n def _get_byte(self):\n try:\n r = struct.unpack(\"B\", self._s.read())[0]\n return r\n except struct.error:\n # Serial port read timeout\n raise socket.timeout\n \n def _put_bytes(self, data):\n #print \"DEBUG: _put_bytes:\", data\n for b in data:\n self._s.write(struct.pack('B', b))\n \n def _unescape(self, packet):\n r = []\n esc = False\n for b in packet:\n if esc:\n r.append(b ^ 0x20)\n esc = False\n elif b == self.HDLC_CTLESC_BYTE:\n esc = True\n else:\n r.append(b)\n return r\n \n def _escape(self, packet):\n r = []\n for b in packet:\n if b == self.HDLC_FLAG_BYTE or b == self.HDLC_CTLESC_BYTE:\n r.append(self.HDLC_CTLESC_BYTE)\n r.append(b ^ 0x20)\n else:\n r.append(b)\n return r\n \n def debug(self, debug):\n self._debug = debug\n\n\nclass SFClient:\n def __init__(self, host, port, qsize=10):\n self._in_queue = Queue(qsize)\n self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._s.connect((host, port))\n data = self._s.recv(2)\n if data != 'U ':\n print \"Wrong handshake\"\n self._s.send(\"U \")\n print \"Connected\"\n thread.start_new_thread(self.run, ())\n\n def run(self):\n while True:\n length = ord(self._s.recv(1))\n data = self._s.recv(length)\n data = [ord(c) for c in data][1:]\n #print \"Recv %d bytes\" % (length), ActiveMessage(data)\n if self._in_queue.full():\n print \"Warning: Buffer overflow\"\n self._in_queue.get()\n p = RawPacket()\n p.crc = 1\n p.data = data\n self._in_queue.put(p, block=False)\n\n def read(self, timeout=0):\n return self._in_queue.get()\n\n def write(self, payload):\n print \"SFClient: write:\", payload\n if type(payload) != type([]):\n # Assume this will be derived from Packet\n payload = payload.payload()\n payload = [0] + payload\n self._s.send(chr(len(payload)))\n self._s.send(''.join([chr(c) for c in payload]))\n return True\n\nclass AM:\n def __init__(self, s):\n self._s = s\n\n def read(self, timeout=None):\n frame = self._s.read(timeout)\n if frame:\n return ActiveMessage(frame.data)\n return frame\n\n def write(self, packet, amid):\n return self._s.write(ActiveMessage(packet, amid=amid))\n\n\nclass Packet:\n \"\"\"\n The Packet class offers a handy way to build pack and unpack\n binary data based on a given pattern.\n \"\"\"\n\n def _decode(self, v):\n r = long(0)\n for i in v:\n r = (r << 8) + i\n return r\n \n def _encode(self, val, dim):\n output = []\n for i in range(dim):\n output.append(int(val & 0xFF))\n val = val >> 8\n output.reverse()\n return output\n \n def __init__(self, desc, packet = None):\n offset = 0\n boffset = 0\n sum = 0\n for i in range(len(desc)-1, -1, -1):\n (n, t, s) = desc[i]\n if s == None:\n if sum > 0:\n desc[i] = (n, t, -sum)\n break\n sum += s\n self.__dict__['_schema'] = [(t, s) for (n, t, s) in desc]\n self.__dict__['_names'] = [n for (n, t, s) in desc]\n self.__dict__['_values'] = []\n if type(packet) == type([]):\n for (t, s) in self._schema:\n if t == 'int':\n self._values.append(self._decode(packet[offset:offset + s]))\n offset += s\n elif t == 'bint':\n doffset = 8 - (boffset + s)\n self._values.append((packet[offset] >> doffset) & ((1<<s) - 1))\n boffset += s\n if boffset == 8:\n offset += 1\n boffset = 0\n elif t == 'string':\n self._values.append(''.join([chr(i) for i in packet[offset:offset + s]]))\n offset += s\n elif t == 'blob':\n if s:\n if s > 0:\n self._values.append(packet[offset:offset + s])\n offset += s\n else:\n self._values.append(packet[offset:s])\n offset = len(packet) + s\n else:\n self._values.append(packet[offset:])\n elif type(packet) == type(()):\n for i in packet:\n self._values.append(i)\n else:\n for v in self._schema:\n self._values.append(None)\n\n def __repr__(self):\n return self._values.__repr__()\n\n def __str__(self):\n r = \"\"\n for i in range(len(self._names)):\n r += \"%s: %s \" % (self._names[i], self._values[i])\n for i in range(len(self._names), len(self._values)):\n r += \"%s\" % self._values[i]\n return r\n# return self._values.__str__()\n\n # Implement the map behavior\n def __getitem__(self, key):\n return self.__getattr__(key)\n\n def __setitem__(self, key, value):\n self.__setattr__(key, value)\n\n def __len__(self):\n return len(self._values)\n\n def keys(self):\n return self._names\n\n def values(self):\n return self._names\n\n # Implement the struct behavior\n def __getattr__(self, name):\n #print \"DEBUG: __getattr__\", name\n if type(name) == type(0):\n return self._names[name]\n else:\n return self._values[self._names.index(name)]\n\n def __setattr__(self, name, value):\n if type(name) == type(0):\n self._values[name] = value\n else:\n self._values[self._names.index(name)] = value\n\n def __ne__(self, other):\n if other.__class__ == self.__class__:\n return self._values != other._values\n else:\n return True\n\n def __eq__(self, other):\n if other.__class__ == self.__class__:\n return self._values == other._values\n else:\n return False\n\n def __nonzero__(self):\n return True;\n\n # Custom\n def names(self):\n return self._names\n\n def sizes(self):\n return self._schema\n\n def payload(self):\n r = []\n boffset = 0\n for i in range(len(self._schema)):\n (t, s) = self._schema[i]\n if t == 'int':\n r += self._encode(self._values[i], s)\n boffset = 0\n elif t == 'bint':\n doffset = 8 - (boffset + s)\n if boffset == 0:\n r += [self._values[i] << doffset]\n else:\n r[-1] |= self._values[i] << doffset\n boffset += s\n if boffset == 8:\n boffset = 0\n elif self._values[i] != []:\n r += self._values[i]\n for i in self._values[len(self._schema):]:\n r += i\n return r\n\n\nclass RawPacket(Packet):\n def __init__(self, ts = None, data = None, crc = None):\n Packet.__init__(self,\n [('ts' , 'int', 4),\n ('crc', 'int', 1),\n ('data', 'blob', None)],\n None)\n self.ts = ts;\n self.data = data\n self.crc = crc\n \nclass AckFrame(Packet):\n def __init__(self, payload = None):\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('seqno', 'int', 1)],\n payload)\n\nclass DataFrame(Packet):\n def __init__(self, payload = None):\n if payload != None and type(payload) != type([]):\n # Assume is a Packet\n payload = payload.payload()\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('seqno', 'int', 1),\n ('dispatch', 'int', 1),\n ('data', 'blob', None)],\n payload)\n\nclass NoAckDataFrame(Packet):\n def __init__(self, payload = None):\n if payload != None and type(payload) != type([]):\n # Assume is a Packet\n payload = payload.payload()\n Packet.__init__(self,\n [('protocol', 'int', 1),\n ('dispatch', 'int', 1),\n ('data', 'blob', None)],\n payload)\n\nclass ActiveMessage(Packet):\n def __init__(self, gpacket = None, amid = 0x00, dest = 0xFFFF):\n if type(gpacket) == type([]):\n payload = gpacket\n else:\n # Assume this will be derived from Packet\n payload = None\n Packet.__init__(self,\n [('destination', 'int', 2),\n ('source', 'int', 2),\n ('length', 'int', 1),\n ('group', 'int', 1),\n ('type', 'int', 1),\n ('data', 'blob', None)],\n payload)\n if payload == None:\n self.destination = dest\n self.source = 0x0000\n self.group = 0x00\n self.type = amid\n self.data = []\n if gpacket:\n self.data = gpacket.payload()\n self.length = len(self.data)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Blueprint, request, jsonify
from to_dict import *
from validacao import *
import sqlite3
from migration import conectar, create_database
from contextlib import closing
aluno = Blueprint("aluno", __name__)
@aluno.route("/hello")
def hello():
return "Hello, aluno"
@aluno.route("/reseta", methods = ["POST"])
def reseta():
sqlaluno = """DELETE FROM aluno"""
sqldisciplina = """DELETE FROM disciplina"""
sqlprofessor = """DELETE FROM professor"""
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sqlaluno)
cursor.execute(sqldisciplina)
cursor.execute(sqlprofessor)
conn.commit()
return jsonify({'sucess': 'reset efetuado com suceso'}), 200
@aluno.route("/alunos", methods = ["GET"])
def alunos_retorna_lista():
sql = """SELECT * FROM aluno"""
resultados = []
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql)
linhas = cursor.fetchall()
for id, nome in linhas:
resultados.append({"id": id, "nome": nome})
return jsonify(resultados), 200
#return jsonify(alunos), 200
@aluno.route('/alunos/<int:id>', methods = ["GET"])
def aluno_por_id(id):
sql = "SELECT id, nome FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (id, ))
r = cursor.fetchone()
if r == None: return None
return {"id": r[0], "nome": r[1]}
@aluno.route("/alunos", methods = ["POST"])
def adiciona_alunos():
dados = request.get_json()
params = (dados['nome'],)
sql = "INSERT INTO aluno (nome) VALUES (?)"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(cursor.lastrowid)
@aluno.route("/alunos/<int:id>", methods = ["PUT"])
def editar_aluno(id):
dados = request.get_json()
params = (dados['nome'], id)
sql = "UPDATE aluno SET nome = ? WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(dados['nome']), 200
# for aluno in alunos:
# if aluno['id'] == id:
# aluno['nome'] = request.get_json().get('nome')
# return jsonify(aluno), 200
# return jsonify({'erro': 'aluno não encontrado'}), 404
@aluno.route("/alunos/<int:id>", methods = ["DELETE"])
def deletar_aluno(id):
params = (id,)
sql = "DELETE FROM aluno WHERE id = ?"
with closing(conectar()) as conn, closing(conn.cursor()) as cursor:
cursor.execute(sql, (params))
conn.commit()
return jsonify(id), 200
|
normal
|
{
"blob_id": "5068336ca1a180e09a7efd41eea596cdcebb33ae",
"index": 5586,
"step-1": "<mask token>\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n<mask token>\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-2": "<mask token>\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-3": "<mask token>\naluno = Blueprint('aluno', __name__)\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-4": "from flask import Blueprint, request, jsonify\nfrom to_dict import *\nfrom validacao import *\nimport sqlite3\nfrom migration import conectar, create_database\nfrom contextlib import closing\naluno = Blueprint('aluno', __name__)\n\n\n@aluno.route('/hello')\ndef hello():\n return 'Hello, aluno'\n\n\n@aluno.route('/reseta', methods=['POST'])\ndef reseta():\n sqlaluno = 'DELETE FROM aluno'\n sqldisciplina = 'DELETE FROM disciplina'\n sqlprofessor = 'DELETE FROM professor'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n\n@aluno.route('/alunos', methods=['GET'])\ndef alunos_retorna_lista():\n sql = 'SELECT * FROM aluno'\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({'id': id, 'nome': nome})\n return jsonify(resultados), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['GET'])\ndef aluno_por_id(id):\n sql = 'SELECT id, nome FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id,))\n r = cursor.fetchone()\n if r == None:\n return None\n return {'id': r[0], 'nome': r[1]}\n\n\n@aluno.route('/alunos', methods=['POST'])\ndef adiciona_alunos():\n dados = request.get_json()\n params = dados['nome'],\n sql = 'INSERT INTO aluno (nome) VALUES (?)'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(cursor.lastrowid)\n\n\n@aluno.route('/alunos/<int:id>', methods=['PUT'])\ndef editar_aluno(id):\n dados = request.get_json()\n params = dados['nome'], id\n sql = 'UPDATE aluno SET nome = ? WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(dados['nome']), 200\n\n\n@aluno.route('/alunos/<int:id>', methods=['DELETE'])\ndef deletar_aluno(id):\n params = id,\n sql = 'DELETE FROM aluno WHERE id = ?'\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, params)\n conn.commit()\n return jsonify(id), 200\n",
"step-5": "from flask import Blueprint, request, jsonify\nfrom to_dict import *\nfrom validacao import *\nimport sqlite3\nfrom migration import conectar, create_database\nfrom contextlib import closing\n\naluno = Blueprint(\"aluno\", __name__)\n\n@aluno.route(\"/hello\")\ndef hello():\n return \"Hello, aluno\"\n\n@aluno.route(\"/reseta\", methods = [\"POST\"])\ndef reseta():\n sqlaluno = \"\"\"DELETE FROM aluno\"\"\"\n sqldisciplina = \"\"\"DELETE FROM disciplina\"\"\"\n sqlprofessor = \"\"\"DELETE FROM professor\"\"\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sqlaluno)\n cursor.execute(sqldisciplina)\n cursor.execute(sqlprofessor)\n conn.commit()\n return jsonify({'sucess': 'reset efetuado com suceso'}), 200\n\n@aluno.route(\"/alunos\", methods = [\"GET\"])\ndef alunos_retorna_lista():\n sql = \"\"\"SELECT * FROM aluno\"\"\"\n resultados = []\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql)\n linhas = cursor.fetchall()\n for id, nome in linhas:\n resultados.append({\"id\": id, \"nome\": nome})\n return jsonify(resultados), 200\n #return jsonify(alunos), 200\n\n@aluno.route('/alunos/<int:id>', methods = [\"GET\"])\ndef aluno_por_id(id):\n sql = \"SELECT id, nome FROM aluno WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (id, ))\n r = cursor.fetchone()\n if r == None: return None\n return {\"id\": r[0], \"nome\": r[1]}\n\n\n@aluno.route(\"/alunos\", methods = [\"POST\"])\ndef adiciona_alunos():\n dados = request.get_json()\n params = (dados['nome'],)\n sql = \"INSERT INTO aluno (nome) VALUES (?)\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(cursor.lastrowid)\n \n\n\n@aluno.route(\"/alunos/<int:id>\", methods = [\"PUT\"])\ndef editar_aluno(id):\n dados = request.get_json()\n params = (dados['nome'], id)\n sql = \"UPDATE aluno SET nome = ? WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(dados['nome']), 200\n\n# for aluno in alunos:\n# if aluno['id'] == id:\n# aluno['nome'] = request.get_json().get('nome')\n# return jsonify(aluno), 200\n# return jsonify({'erro': 'aluno não encontrado'}), 404\n\n@aluno.route(\"/alunos/<int:id>\", methods = [\"DELETE\"])\ndef deletar_aluno(id):\n params = (id,)\n sql = \"DELETE FROM aluno WHERE id = ?\"\n with closing(conectar()) as conn, closing(conn.cursor()) as cursor:\n cursor.execute(sql, (params))\n conn.commit()\n return jsonify(id), 200",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import platform
import keyboard
import threading
import atexit
from threading import Timer
triggerCount = 0
triggerTimer = -1
result = None
def cleanup ():
print 'cleanup before exit'
clearTimer()
keyboard
triggerCount = 0
def clearTimer ():
global triggerTimer
global triggerCount
try:
triggerTimer.isAlive()
if triggerTimer.isAlive():
triggerTimer.cancel()
triggerTimer = -1
except AttributeError:
pass
def startTimer ():
global triggerTimer
triggerTimer = Timer(0.6, validTimeout)
triggerTimer.start()
def validTimeout ():
global triggerTimer
global triggerCount
clearTimer()
triggerCount = 0
def onPresskey ():
global triggerTimer
global triggerCount
triggerCount += 1
clearTimer()
if triggerCount == 2:
print('HOTKEY-COPY')
triggerCount = 0
clearTimer()
else:
startTimer()
def registerCopyHotkey ():
if (platform.system() == 'Darwin'):
keyboard.add_hotkey('cmd+c', onPresskey)
else:
keyboard.add_hotkey('ctrl+c', onPresskey)
keyboard.wait()
def main ():
registerCopyHotkey()
if __name__ == '__main__':
atexit.register(cleanup)
main()
|
normal
|
{
"blob_id": "9e8ed462e429d6c6c0fe232431ee1e98721863e9",
"index": 6148,
"step-1": "import platform\nimport keyboard\nimport threading\nimport atexit\nfrom threading import Timer\n\ntriggerCount = 0\ntriggerTimer = -1\n\nresult = None\n\ndef cleanup ():\n print 'cleanup before exit'\n clearTimer()\n keyboard\n triggerCount = 0\n\ndef clearTimer ():\n global triggerTimer\n global triggerCount\n try:\n triggerTimer.isAlive()\n if triggerTimer.isAlive():\n triggerTimer.cancel()\n triggerTimer = -1\n except AttributeError:\n pass\n\ndef startTimer ():\n global triggerTimer\n triggerTimer = Timer(0.6, validTimeout)\n triggerTimer.start()\n\ndef validTimeout ():\n global triggerTimer\n global triggerCount\n clearTimer()\n triggerCount = 0\n\ndef onPresskey ():\n global triggerTimer\n global triggerCount\n triggerCount += 1\n clearTimer()\n\n if triggerCount == 2:\n print('HOTKEY-COPY')\n triggerCount = 0\n clearTimer()\n else:\n startTimer()\n \n\ndef registerCopyHotkey ():\n if (platform.system() == 'Darwin'):\n keyboard.add_hotkey('cmd+c', onPresskey)\n else:\n keyboard.add_hotkey('ctrl+c', onPresskey)\n keyboard.wait()\n\ndef main ():\n registerCopyHotkey()\n\nif __name__ == '__main__':\n atexit.register(cleanup)\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Дано предложение, в котором имеются буквы с и т. Определить, какая из них встречается
позже (при просмотре слова слева направо). Если таких букв несколько, то должны
учитываться последние из них. Оператор цикла с условием не использовать.
'''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
text = input("Введите предложение: ")
x1 = text.index("с")
x2 = text.index("т")
if x1 > x2:
print("Бурква 'с' встречается позже")
else:
print("Бурква 'т' встречается позже")
|
normal
|
{
"blob_id": "4bad45f8c135463fadea9b3eed52ab045a51e8db",
"index": 2520,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n text = input('Введите предложение: ')\n x1 = text.index('с')\n x2 = text.index('т')\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-3": "'''\nДано предложение, в котором имеются буквы с и т. Определить, какая из них встречается\nпозже (при просмотре слова слева направо). Если таких букв несколько, то должны\nучитываться последние из них. Оператор цикла с условием не использовать.\n'''\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nif __name__ == '__main__':\n text = input(\"Введите предложение: \")\n\n x1 = text.index(\"с\")\n x2 = text.index(\"т\")\n if x1 > x2:\n print(\"Бурква 'с' встречается позже\")\n else:\n print(\"Бурква 'т' встречается позже\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('mgdata.dat.csv')
training_set = dataset.iloc[:1100, 1:2].values
X_train=[]
y_train=[]
for i in range(20,1090):
X_train.append(training_set[i-20:i,0])
y_train.append(training_set[i,0])
X_train=np.asarray(X_train)
y_train=np.asarray(y_train)
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu', input_dim = 20))
# Adding the second hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))
# Adding the third hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'linear'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = [])
# Fitting the ANN to the Training set
history =classifier.fit(X_train, y_train, batch_size =8, nb_epoch = 60,validation_split=0.03)
dataset_test=dataset.iloc[1100:1110, 1:2].values
y_test=dataset.iloc[1100:1110, 1:2].values
dataset_test=pd.DataFrame(dataset_test)
dataset_train=pd.DataFrame(training_set)
dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values
inputs = inputs.reshape(-1,1)
X_test = []
for i in range(20,30):
X_test.append(inputs[i-20:i, 0])
X_test = np.array(X_test)
predicted = classifier.predict(X_test)
# Visualising the results
plt.plot(y_test, color = 'red', label="real" )
plt.plot(predicted, color = 'blue', label="predicted")
plt.legend()
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
|
normal
|
{
"blob_id": "28a3763715f5405f8abe2de17ed5f9df1019278b",
"index": 6878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\n<mask token>\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\n<mask token>\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\n<mask token>\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('mgdata.dat.csv')\ntraining_set = dataset.iloc[:1100, 1:2].values\nX_train = []\ny_train = []\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\nX_train = np.asarray(X_train)\ny_train = np.asarray(y_train)\n<mask token>\nclassifier = Sequential()\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\nhistory = classifier.fit(X_train, y_train, batch_size=8, nb_epoch=60,\n validation_split=0.03)\ndataset_test = dataset.iloc[1100:1110, 1:2].values\ny_test = dataset.iloc[1100:1110, 1:2].values\ndataset_test = pd.DataFrame(dataset_test)\ndataset_train = pd.DataFrame(training_set)\ndataset_total = pd.concat((dataset_train, dataset_test), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\ninputs = inputs.reshape(-1, 1)\nX_test = []\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\nX_test = np.array(X_test)\npredicted = classifier.predict(X_test)\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('mgdata.dat.csv')\ntraining_set = dataset.iloc[:1100, 1:2].values\nX_train = []\ny_train = []\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\nX_train = np.asarray(X_train)\ny_train = np.asarray(y_train)\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nclassifier = Sequential()\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\nhistory = classifier.fit(X_train, y_train, batch_size=8, nb_epoch=60,\n validation_split=0.03)\ndataset_test = dataset.iloc[1100:1110, 1:2].values\ny_test = dataset.iloc[1100:1110, 1:2].values\ndataset_test = pd.DataFrame(dataset_test)\ndataset_train = pd.DataFrame(training_set)\ndataset_total = pd.concat((dataset_train, dataset_test), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\ninputs = inputs.reshape(-1, 1)\nX_test = []\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\nX_test = np.array(X_test)\npredicted = classifier.predict(X_test)\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-5": "\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\ndataset = pd.read_csv('mgdata.dat.csv')\r\ntraining_set = dataset.iloc[:1100, 1:2].values\r\n\r\nX_train=[]\r\ny_train=[]\r\nfor i in range(20,1090):\r\n X_train.append(training_set[i-20:i,0])\r\n y_train.append(training_set[i,0])\r\nX_train=np.asarray(X_train)\r\ny_train=np.asarray(y_train)\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n# Initialising the ANN\r\nclassifier = Sequential()\r\n\r\n# Adding the input layer and the first hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu', input_dim = 20))\r\n\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))\r\n# Adding the third hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))\r\n\r\n# Adding the output layer\r\nclassifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'linear'))\r\n\r\n# Compiling the ANN\r\nclassifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = [])\r\n\r\n# Fitting the ANN to the Training set\r\nhistory =classifier.fit(X_train, y_train, batch_size =8, nb_epoch = 60,validation_split=0.03)\r\n\r\ndataset_test=dataset.iloc[1100:1110, 1:2].values\r\ny_test=dataset.iloc[1100:1110, 1:2].values\r\ndataset_test=pd.DataFrame(dataset_test)\r\ndataset_train=pd.DataFrame(training_set)\r\n\r\n\r\ndataset_total = pd.concat((dataset_train, dataset_test), axis = 0)\r\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\r\n\r\n\r\ninputs = inputs.reshape(-1,1)\r\n\r\nX_test = []\r\nfor i in range(20,30):\r\n X_test.append(inputs[i-20:i, 0])\r\nX_test = np.array(X_test)\r\n\r\npredicted = classifier.predict(X_test)\r\n\r\n\r\n# Visualising the results\r\nplt.plot(y_test, color = 'red', label=\"real\" )\r\nplt.plot(predicted, color = 'blue', label=\"predicted\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'validation'], loc='upper left')\r\nplt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Author: Abeer Rafiq
#Modified: 11/23/2019 3:00pm
#Importing Packages
import socket, sys, time, json, sqlite3
import RPi.GPIO as GPIO
from datetime import datetime, date
#Creating a global server class
class GlobalServer:
#The constructor
def __init__(self, port, room_ip_addrs,
app_ip_addrs):
#Setting port
self.__port = int(port)
#Setting socket to receive
self.__soc_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recv_address = ('', self.__port)
self.__soc_recv.bind(recv_address)
#Setting socket/addresses to send to the room rpi and app
self.__soc_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__room_addrs = (room_ip_addrs, self.__port)
self.__app_addrs = (app_ip_addrs, self.__port)
#Setting up led blinking
self.__receiveLED = 14
self.__sendLED = 15
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.__receiveLED, GPIO.OUT)
GPIO.setup(self.__sendLED, GPIO.OUT)
#Setting up string for acknowldegements
self.__ackstr = "{'opcode':'0'}"
#Setting database connections
dbpath = '/home/pi/Documents/Team_Project/dataBases/plantNursery_DB.db'
self.__dbconnect = sqlite3.connect(dbpath);
self.__dbconnect.row_factory = sqlite3.Row;
self.__cursor = self.__dbconnect.cursor()
#Setting up default threshold variables
self.__defaultThresholdValue = 80
self.__defaultLessGreaterThan = "<"
self.__lightThreshold = self.__defaultThresholdValue
self.__lightLessGreaterThan = self.__defaultLessGreaterThan
self.__soilMoistureThreshold = self.__defaultThresholdValue
self.__soilMoistureLessGreaterThan = self.__defaultLessGreaterThan
self.__roomHumidityThreshold = self.__defaultThresholdValue
self.__roomHumidityLessGreaterThan = self.__defaultLessGreaterThan
self.__roomTemperatureThreshold = self.__defaultThresholdValue
self.__roomTemperatureLessGreaterThan = self.__defaultLessGreaterThan
self.__currentLight = 0
self.__currentSoilMoisture = 0
self.__currentWaterDistance = 0
self.__currentRoomHumidity = 0
self.__currentRoomTemperature = 0
self.__waterPumpDuration = 2
#Setting timeout/end time values
self.__ack_timeout = 1
self.__ack_endTime = 4
print("\nGlobal Server Initialized")
#To blink a pin once
def blink(self, pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
return
#Receives/returns buffer and sends ack
def receive(self):
#Receiving
print("\nWaiting to receive on port %d ... " % self.__port)
buf, address = self.__soc_recv.recvfrom(self.__port)
if(len(buf) > 0):
#Blink receive Led
self.blink(self.__receiveLED)
print ("Received %s bytes from '%s': %s " % (len(buf), address[0], buf))
#Sending ack
self.__soc_send.sendto(self.__ackstr, (address[0], self.__port))
#Blink send Led
self.blink(self.__sendLED)
print ("Sent %s to %s" % (self.__ackstr, (address[0], self.__port)))
#Give time for the ack sent to be acknowledged
time.sleep(self.__ack_endTime)
return buf
else:
return False
#To insert data into the database
def insertDBData(self, mySQL):
#Try inserting data to database table
try:
#Insert data
self.__cursor.execute(mySQL)
self.__dbconnect.commit();
except sqlite3.Error, e:
#If error, exit program
print ('\nDatabase Error %s:' % e.args[0])
self.__soc_recv.shutdown(1)
self.__soc_send.shutdown(1)
self.__cursor.close()
sys.exit(1)
return
#To add default threshold entries into the db
def setDefaultThresholds(self, potID):
potID = str(potID)
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Insert default thresholds into db
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'light', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'soilMoisture', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'roomTemperature', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'roomHumidity', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nSet Default Thresholds")
return
#To add user requested threshold entries into the db
def updateUserThresholdsTable(self, threshold):
potID = str(threshold.get("potID"))
lessGreaterThan = str(threshold.get("lessGreaterThan"))
thresholdValue = float(str(threshold.get("thresholdValue")))
sensorType = str(threshold.get("sensorType"))
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Insert thresholds into db
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', '" + sensorType + "', '" + str(thresholdValue) + \
"', '" + lessGreaterThan + "', '" + str(tdate) + "', '" + str(ttime) + "')"
self.insertDBData(mySQL)
#Reassign global server's instance threshold variables
if sensorType == "light":
self.__lightThreshold = thresholdValue
self.__lightLessGreaterThan = lessGreaterThan
elif sensorType == "soilMoisture":
self.__soilMoistureThreshold = thresholdValue
self.__soilMoistureLessGreaterThan = lessGreaterThan
elif sensorType == "roomTemperature":
self.__roomHumidityThreshold = thresholdValue
self.__roomHumidityLessGreaterThan = lessGreaterThan
elif sensorType == "roomHumidity":
self.__roomTemperatureThreshold = thresholdValue
self.__roomTemperatureLessGreaterThan = lessGreaterThan
print("\nSet User Requested Thresholds")
return
#To update user data in userPlantsTable
def updateUserPlantsTable(self, userInfo):
potID = str(userInfo.get('potID'))
roomID = str(userInfo.get('roomID'))
ownerID = str(userInfo.get('ownerID'))
#Inserting user data into db
mySQL = "INSERT INTO userPlants VALUES ('" + potID + "', '" + roomID + "', '" + ownerID + "')"
self.insertDBData(mySQL)
print("\nUpdated User Data")
return
#To update notes in userNotesTable
def updateUserNotesTable(self, userNotes):
potID = str(userNotes.get('potID'))
notes = str(userNotes.get('notes'))
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Inserting notes into db
mySQL = "INSERT INTO userNotes VALUES ('" + potID + "', '" + notes + "', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Notes Data")
return
#To update pot data in db
def updatePotTable(self, sensorInfo, tdate, time):
potID = sensorInfo.get('potID')
self.__currentWaterDistance = sensorInfo.get('waterDistance')
self.__currentLight = sensorInfo.get('light')
self.__currentSoilMoisture = sensorInfo.get('soilMoisture')
#Inserting pot data into db
mySQL = "INSERT INTO potData VALUES ('" + str(potID) + "', '" + str(self.__currentLight)+ "', '" + \
str(self.__currentSoilMoisture) + "', '" + str(self.__currentWaterDistance) + "', '" + \
tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Pot Data")
return
#To update room data in db
def updateRoomTable(self, sensorInfo,tdate, time):
self.__currentRoomTemperature = round(sensorInfo.get('temperature'), 2)
self.__currentRoomHumidity = round(sensorInfo.get('humidity'), 2)
roomID = sensorInfo.get('roomID')
#Inserting room data into db
mySQL = "insert into roomData values ('" + str(roomID) + "', '" + str(self.__currentRoomTemperature) + \
"', '" + str(self.__currentRoomHumidity) + "' , '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Room Data")
return
#To compare current sensor data to threshold values
def checkUserThresholds(self):
#Notification json #Should be receiving an ack so timeout if no ack receivedstrings
lightNotfn = '{"opcode" : "D", "sensorArray" : "1, 0, 0, 0, 0, 0, 0, 0, 0, 0"}'
roomHumidityNotfn = '{"opcode" : "D", "sensorArray" : "0, 1, 0, 0, 0, 0, 0, 0, 0, 0"}'
roomTemperatureNotfn = '{"opcode" : "D", "sensorArray" : "0, 0, 1, 0, 0, 0, 0, 0, 0, 0"}'
soilMoistureNotfn = '{"opcode" : "D", "sensorArray" : "0, 0, 0, 1, 0, 0, 0, 0, 0, 0"}'
#Tuples of sensor data to easily neatly
light = (self.__currentLight, self.__lightThreshold, self.__lightLessGreaterThan, lightNotfn)
soilMoisture = (self.__currentSoilMoisture, self.__soilMoistureThreshold, \
self.__soilMoistureLessGreaterThan, soilMoistureNotfn, self.__waterPumpDuration)
roomHumidity = (self.__currentRoomHumidity, self.__roomHumidityThreshold, \
self.__roomHumidityLessGreaterThan, roomHumidityNotfn)
roomTemperature = (self.__currentRoomTemperature, self.__roomTemperatureThreshold, \
self.__roomTemperatureLessGreaterThan, roomTemperatureNotfn)
#Combined tuples for sensors
sensorArr = [light, roomHumidity, roomTemperature, soilMoisture]
#For each sensor compare current sensor value with threshold value
for sensor in sensorArr:
if sensor[2] == ">":
if sensor[0] > sensor[1]:
#Threshold is met, notify user
notifyApp(sensor[3])
if(len(sensor) == 4):
#Soil moisture's threshold is met, then start water pump, notify user
startPumpStr = '{"opcode" : "4", "pumpDuration" : "' + str(sensor[4]) + '"}'
startWaterPump(startPumpStr)
notifyApp(startPumpStr)
elif sensor[2] == "<":
if sensor[0] < sensor[1]:
#Threshold is met, notify user
notifyApp(sensor[3])
if(length(sensor) == 4):
#Soil moisture's threshold is met, then start water pump, notify user
startPumpStr = '{"opcode" : "4", "pumpDuration" : "' + str(sensor[4]) + '"}'
startWaterPump(startPumpStr)
notifyApp(startPumpStr)
print("\Thresholds Compared")
return
#Send room rpi msg to start water pump
def startWaterPump(self, startPump):
if (self.send_Room_Msg(startPump) == False):
#If no ack received, send msg again
print("\nStart Water Pump sent again to server")
self.startWaterPump(startPump)
return
#To send msgs to the room and wait for ack
def send_Room_Msg(self, message):
self.__soc_send.sendto(message, self.__room_addrs)
#Blink send LED
self.blink(self.__sendLED)
print("\Message sent to Room: " + message)
#Should be receiving an ack so timeout if no ack received
soc_recv.settimeout(self.__ack_timeout)
startTime = time.time()
endTime = self.__ack_endTime
while (True):
#If less than a endTime amount of time
if time.time() < (startTime + endTime):
try:
#Try Receving otherwise timeout and retry
print("Waiting for Acknowledgement . . .")
buf, address = soc_recv.recvfrom(self.__port)
except socket.timeout:
print("Receiving is Timed Out")
#Restart while loop (Retry)
continue
try:
#If buf is received, try to load it
buf = json.loads(buf)
if not len(buf):
#No ack received, retry
continue
else:
if (buf.get("opcode") == "0"):
#Ack recevied!
print("Acknowledgement Received")
return True
else:
#No ack received, retry
continue
except (ValueError, KeyError, TypeError):
#Ack not received, try again
continue
else:
#Failed to receive ack within a endTime amount of time
return False
return
#To notifcations msgs to the app
def notifyApp(self, message):
if (self.send_App_Msg(message) == False):
#If no ack received, send msg again
print("\nNotification sent again to server")
self.notifyApp(message)
return
#To send msgs to the app and wait for ack
def send_App_Msg(self, message):
self.__soc_send.sendto(message, self.__app_addrs)
#Blink send LED
self.blink(self.__sendLED)
print("\nNotifcation sent to App: " + message)
#Should be receiving an ack so timeout if no ack received
soc_recv.settimeout(self.__ack_timeout)
startTime = time.time()
endTime = self.__ack_endTime
while (True):
#If less than a endTime amount of time
if time.time() < (startTime + endTime):
try:
#Try Receving otherwise timeout and retry
print("Waiting for Acknowledgement . . .")
buf, address = soc_recv.recvfrom(self.__port)
except socket.timeout:
print("Receiving is Timed Out")
#Restart while loop (Retry)
continue
try:
#If buf is received, try to load it
buf = json.loads(buf)
if not len(buf):
#No ack received, retry
continue
else:
if (buf.get("opcode") == "0"):
#Ack recevied!
print("Acknowledgement Received")
return True
else:
#No ack received, retry
continue
except (ValueError, KeyError, TypeError):
#Ack not received, try again
continue
else:
#Failed to receive ack within a endTime amount of time
return False
return
#To get requested stats from the db
def get_stats(self, rowNumbers, sensors):
#Try retrieving data from the database
try:
#Retrieve Data
sensors = sensors.replace('"',"").replace("'","").replace('[',"").replace(']',"")
mysql = """SELECT """ + sensors + """, tdate, ttime FROM (
SELECT * FROM userPlants a
INNER JOIN potData b
ON a.potID = b.potID
INNER JOIN roomData c
ON a.roomID = c.roomID AND b.tdate = c.tdate AND b.ttime = c.ttime
ORDER BY c.tdate DESC, c.ttime DESC LIMIT """ + str(rowNumbers) + """)"""
myresult = self.__cursor.execute(mysql).fetchall()
except sqlite3.Error, e:
#If error, exit program
print '\nDatabase Error %s:' % e.args[0]
sys.exit(1)
#Convert data into json format
stats = json.dumps( [dict(i) for i in myresult] )
print("\nData Retreived from DB")
return stats
#To send the stats with the corresponding opcode
def send_stats(self, rowNumbers, sensors):
if rowNumbers == '0':
#0 means to send app just one most recent row of data (opcode E)
oneRow = globalServer.get_stats(1, sensors)
stats = '{"opcode" : "E", "statsArray" : "' + str(oneRow) + '"}'
else:
#Otherwise send mutiple recent rows of data (opcode 6)
manyRows = globalServer.get_stats(rowNumbers, sensors)
stats = '{"opcode" : "6", "statsArray" : "' + str(manyRows) + '"}'
#Send stats to App
#If ack received return
if (self.send_notifyApp(error) == True):
print("\nStats sent to app")
else:
#If no ack received, try sending again
print("\nStats sent again to app (notify again)")
self.send_stats(rowNumbers, sensors)
return
#Main function which receives json data and invokes methods based on opcode received
def main():
#Create GlobalServer object (port, room_ip_addrs, app_ip_addrs)
globalServer = GlobalServer(1000, '192.168.1.47',
'192.168.137.102')
while True:
message = globalServer.receive()
if (message == False):
#If length of buffer is <1
continue
else:
message = json.loads(message)
#User wants to update notes table
if (message.get('opcode') == "1"):
globalServer.updateUserNotesTable(message)
#User wants to add a pot with a room and owner
if (message.get('opcode') == "2"):
globalServer.updateUserPlantsTable(message)
#Set default thresholds for that potID
globalServer.setDefaultThresholds(message.get("potID"))
#If user wants to set thresholds to requested ones
if (message.get('opcode') == "3"):
globalServer.updateUserThresholdsTable(message)
#If user wants to view stats
if (message.get('opcode') == "5"):
rowNumbers = message.get("rowNumbers")
sensors = message.get("sensorType")
globalServer.send_stats(rowNumbers, sensors)
#If an error has occured in the room rpi or arduino
if (message.get('opcode') == "D"):
globalServer.notifyApp(str(message))
#If room rpi sent all sensory data, update tables, compare values to thresholds as well
if (message.get('opcode') == "9"):
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
globalServer.updateRoomTable(message, tdate, ttime)
globalServer.updatePotTable(message, tdate, ttime)
globalServer.checkUserThresholds()
self.__soc_recv.shutdown(1)
self.__soc_send.shutdown(1)
self.__cursor.close()
return
if __name__== "__main__":
main()
|
normal
|
{
"blob_id": "7ce679d5b889493f278de6deca6ec6bdb7acd3f5",
"index": 910,
"step-1": "#Author: Abeer Rafiq\n#Modified: 11/23/2019 3:00pm\n\n#Importing Packages\nimport socket, sys, time, json, sqlite3\nimport RPi.GPIO as GPIO\nfrom datetime import datetime, date\n\n#Creating a global server class\nclass GlobalServer:\n #The constructor\n def __init__(self, port, room_ip_addrs,\n app_ip_addrs):\n #Setting port\n self.__port = int(port)\n #Setting socket to receive\n self.__soc_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n recv_address = ('', self.__port)\n self.__soc_recv.bind(recv_address)\n #Setting socket/addresses to send to the room rpi and app\n self.__soc_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.__room_addrs = (room_ip_addrs, self.__port)\n self.__app_addrs = (app_ip_addrs, self.__port)\n #Setting up led blinking\n self.__receiveLED = 14\n self.__sendLED = 15\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(self.__receiveLED, GPIO.OUT)\n GPIO.setup(self.__sendLED, GPIO.OUT)\n #Setting up string for acknowldegements\n self.__ackstr = \"{'opcode':'0'}\"\n #Setting database connections\n dbpath = '/home/pi/Documents/Team_Project/dataBases/plantNursery_DB.db'\n self.__dbconnect = sqlite3.connect(dbpath); \n self.__dbconnect.row_factory = sqlite3.Row;\n self.__cursor = self.__dbconnect.cursor() \n #Setting up default threshold variables\n self.__defaultThresholdValue = 80\n self.__defaultLessGreaterThan = \"<\"\n self.__lightThreshold = self.__defaultThresholdValue\n self.__lightLessGreaterThan = self.__defaultLessGreaterThan\n self.__soilMoistureThreshold = self.__defaultThresholdValue\n self.__soilMoistureLessGreaterThan = self.__defaultLessGreaterThan\n self.__roomHumidityThreshold = self.__defaultThresholdValue\n self.__roomHumidityLessGreaterThan = self.__defaultLessGreaterThan\n self.__roomTemperatureThreshold = self.__defaultThresholdValue\n self.__roomTemperatureLessGreaterThan = self.__defaultLessGreaterThan\n self.__currentLight = 0\n self.__currentSoilMoisture = 0\n self.__currentWaterDistance = 0\n self.__currentRoomHumidity = 0\n self.__currentRoomTemperature = 0\n self.__waterPumpDuration = 2\n #Setting timeout/end time values\n self.__ack_timeout = 1\n self.__ack_endTime = 4\n print(\"\\nGlobal Server Initialized\")\n \n #To blink a pin once\n def blink(self, pin):\n GPIO.output(pin,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(pin,GPIO.LOW)\n return\n \n #Receives/returns buffer and sends ack \n def receive(self):\n #Receiving\n print(\"\\nWaiting to receive on port %d ... \" % self.__port)\n buf, address = self.__soc_recv.recvfrom(self.__port)\n if(len(buf) > 0):\n #Blink receive Led\n self.blink(self.__receiveLED)\n print (\"Received %s bytes from '%s': %s \" % (len(buf), address[0], buf))\n #Sending ack\n self.__soc_send.sendto(self.__ackstr, (address[0], self.__port))\n #Blink send Led\n self.blink(self.__sendLED)\n print (\"Sent %s to %s\" % (self.__ackstr, (address[0], self.__port)))\n #Give time for the ack sent to be acknowledged\n time.sleep(self.__ack_endTime)\n return buf\n else:\n return False\n \n #To insert data into the database\n def insertDBData(self, mySQL):\n #Try inserting data to database table\n try:\n #Insert data\n self.__cursor.execute(mySQL)\n self.__dbconnect.commit();\n except sqlite3.Error, e:\n #If error, exit program \n print ('\\nDatabase Error %s:' % e.args[0])\n self.__soc_recv.shutdown(1)\n self.__soc_send.shutdown(1)\n self.__cursor.close()\n sys.exit(1)\n return\n \n #To add default threshold entries into the db\n def setDefaultThresholds(self, potID):\n potID = str(potID)\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Insert default thresholds into db\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'light', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'soilMoisture', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'roomTemperature', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'roomHumidity', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nSet Default Thresholds\")\n return\n \n #To add user requested threshold entries into the db\n def updateUserThresholdsTable(self, threshold):\n potID = str(threshold.get(\"potID\"))\n lessGreaterThan = str(threshold.get(\"lessGreaterThan\"))\n thresholdValue = float(str(threshold.get(\"thresholdValue\")))\n sensorType = str(threshold.get(\"sensorType\"))\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Insert thresholds into db\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', '\" + sensorType + \"', '\" + str(thresholdValue) + \\\n \"', '\" + lessGreaterThan + \"', '\" + str(tdate) + \"', '\" + str(ttime) + \"')\" \n self.insertDBData(mySQL)\n #Reassign global server's instance threshold variables\n if sensorType == \"light\":\n self.__lightThreshold = thresholdValue\n self.__lightLessGreaterThan = lessGreaterThan\n elif sensorType == \"soilMoisture\":\n self.__soilMoistureThreshold = thresholdValue \n self.__soilMoistureLessGreaterThan = lessGreaterThan\n elif sensorType == \"roomTemperature\":\n self.__roomHumidityThreshold = thresholdValue\n self.__roomHumidityLessGreaterThan = lessGreaterThan\n elif sensorType == \"roomHumidity\":\n self.__roomTemperatureThreshold = thresholdValue\n self.__roomTemperatureLessGreaterThan = lessGreaterThan\n print(\"\\nSet User Requested Thresholds\")\n return\n\n #To update user data in userPlantsTable\n def updateUserPlantsTable(self, userInfo):\n potID = str(userInfo.get('potID'))\n roomID = str(userInfo.get('roomID'))\n ownerID = str(userInfo.get('ownerID'))\n #Inserting user data into db\n mySQL = \"INSERT INTO userPlants VALUES ('\" + potID + \"', '\" + roomID + \"', '\" + ownerID + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated User Data\")\n return\n \n #To update notes in userNotesTable\n def updateUserNotesTable(self, userNotes):\n potID = str(userNotes.get('potID'))\n notes = str(userNotes.get('notes'))\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Inserting notes into db\n mySQL = \"INSERT INTO userNotes VALUES ('\" + potID + \"', '\" + notes + \"', '\" + tdate + \"', '\" + ttime + \"')\"\n self.insertDBData(mySQL)\n print(\"\\nUpdated Notes Data\")\n return\n \n #To update pot data in db\n def updatePotTable(self, sensorInfo, tdate, time):\n potID = sensorInfo.get('potID')\n self.__currentWaterDistance = sensorInfo.get('waterDistance')\n self.__currentLight = sensorInfo.get('light')\n self.__currentSoilMoisture = sensorInfo.get('soilMoisture')\n #Inserting pot data into db\n mySQL = \"INSERT INTO potData VALUES ('\" + str(potID) + \"', '\" + str(self.__currentLight)+ \"', '\" + \\\n str(self.__currentSoilMoisture) + \"', '\" + str(self.__currentWaterDistance) + \"', '\" + \\\n tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated Pot Data\")\n return\n \n #To update room data in db\n def updateRoomTable(self, sensorInfo,tdate, time):\n self.__currentRoomTemperature = round(sensorInfo.get('temperature'), 2)\n self.__currentRoomHumidity = round(sensorInfo.get('humidity'), 2)\n roomID = sensorInfo.get('roomID')\n #Inserting room data into db\n mySQL = \"insert into roomData values ('\" + str(roomID) + \"', '\" + str(self.__currentRoomTemperature) + \\\n \"', '\" + str(self.__currentRoomHumidity) + \"' , '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated Room Data\")\n return\n\n\n #To compare current sensor data to threshold values\n def checkUserThresholds(self):\n #Notification json #Should be receiving an ack so timeout if no ack receivedstrings\n lightNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"1, 0, 0, 0, 0, 0, 0, 0, 0, 0\"}' \n roomHumidityNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 1, 0, 0, 0, 0, 0, 0, 0, 0\"}'\n roomTemperatureNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 0, 1, 0, 0, 0, 0, 0, 0, 0\"}'\n soilMoistureNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 0, 0, 1, 0, 0, 0, 0, 0, 0\"}'\n #Tuples of sensor data to easily neatly\n light = (self.__currentLight, self.__lightThreshold, self.__lightLessGreaterThan, lightNotfn)\n soilMoisture = (self.__currentSoilMoisture, self.__soilMoistureThreshold, \\\n self.__soilMoistureLessGreaterThan, soilMoistureNotfn, self.__waterPumpDuration)\n roomHumidity = (self.__currentRoomHumidity, self.__roomHumidityThreshold, \\\n self.__roomHumidityLessGreaterThan, roomHumidityNotfn)\n roomTemperature = (self.__currentRoomTemperature, self.__roomTemperatureThreshold, \\\n self.__roomTemperatureLessGreaterThan, roomTemperatureNotfn)\n #Combined tuples for sensors\n sensorArr = [light, roomHumidity, roomTemperature, soilMoisture]\n #For each sensor compare current sensor value with threshold value\n for sensor in sensorArr:\n if sensor[2] == \">\":\n if sensor[0] > sensor[1]:\n #Threshold is met, notify user\n notifyApp(sensor[3])\n if(len(sensor) == 4):\n #Soil moisture's threshold is met, then start water pump, notify user\n startPumpStr = '{\"opcode\" : \"4\", \"pumpDuration\" : \"' + str(sensor[4]) + '\"}'\n startWaterPump(startPumpStr) \n notifyApp(startPumpStr) \n elif sensor[2] == \"<\":\n if sensor[0] < sensor[1]:\n #Threshold is met, notify user\n notifyApp(sensor[3])\n if(length(sensor) == 4):\n #Soil moisture's threshold is met, then start water pump, notify user\n startPumpStr = '{\"opcode\" : \"4\", \"pumpDuration\" : \"' + str(sensor[4]) + '\"}'\n startWaterPump(startPumpStr) \n notifyApp(startPumpStr) \n print(\"\\Thresholds Compared\")\n return\n \n #Send room rpi msg to start water pump\n def startWaterPump(self, startPump):\n if (self.send_Room_Msg(startPump) == False):\n #If no ack received, send msg again\n print(\"\\nStart Water Pump sent again to server\")\n self.startWaterPump(startPump)\n return\n \n #To send msgs to the room and wait for ack\n def send_Room_Msg(self, message):\n self.__soc_send.sendto(message, self.__room_addrs)\n #Blink send LED\n self.blink(self.__sendLED)\n print(\"\\Message sent to Room: \" + message)\n #Should be receiving an ack so timeout if no ack received\n soc_recv.settimeout(self.__ack_timeout)\n startTime = time.time()\n endTime = self.__ack_endTime\n while (True):\n #If less than a endTime amount of time\n if time.time() < (startTime + endTime):\n try:\n #Try Receving otherwise timeout and retry\n print(\"Waiting for Acknowledgement . . .\")\n buf, address = soc_recv.recvfrom(self.__port)\n except socket.timeout:\n print(\"Receiving is Timed Out\")\n #Restart while loop (Retry)\n continue\n try:\n #If buf is received, try to load it\n buf = json.loads(buf)\n if not len(buf):\n #No ack received, retry\n continue\n else:\n if (buf.get(\"opcode\") == \"0\"):\n #Ack recevied!\n print(\"Acknowledgement Received\")\n return True\n else:\n #No ack received, retry\n continue\n except (ValueError, KeyError, TypeError):\n #Ack not received, try again\n continue\n else:\n #Failed to receive ack within a endTime amount of time\n return False\n return\n \n #To notifcations msgs to the app\n def notifyApp(self, message):\n if (self.send_App_Msg(message) == False):\n #If no ack received, send msg again\n print(\"\\nNotification sent again to server\")\n self.notifyApp(message)\n return\n \n #To send msgs to the app and wait for ack\n def send_App_Msg(self, message):\n self.__soc_send.sendto(message, self.__app_addrs)\n #Blink send LED\n self.blink(self.__sendLED)\n print(\"\\nNotifcation sent to App: \" + message)\n #Should be receiving an ack so timeout if no ack received\n soc_recv.settimeout(self.__ack_timeout)\n startTime = time.time()\n endTime = self.__ack_endTime\n while (True):\n #If less than a endTime amount of time\n if time.time() < (startTime + endTime):\n try:\n #Try Receving otherwise timeout and retry\n print(\"Waiting for Acknowledgement . . .\")\n buf, address = soc_recv.recvfrom(self.__port)\n except socket.timeout:\n print(\"Receiving is Timed Out\")\n #Restart while loop (Retry)\n continue\n try:\n #If buf is received, try to load it\n buf = json.loads(buf)\n if not len(buf):\n #No ack received, retry\n continue\n else:\n if (buf.get(\"opcode\") == \"0\"):\n #Ack recevied!\n print(\"Acknowledgement Received\")\n return True\n else:\n #No ack received, retry\n continue\n except (ValueError, KeyError, TypeError):\n #Ack not received, try again\n continue\n else:\n #Failed to receive ack within a endTime amount of time\n return False\n return\n \n #To get requested stats from the db\n def get_stats(self, rowNumbers, sensors):\n #Try retrieving data from the database\n try:\n #Retrieve Data\n sensors = sensors.replace('\"',\"\").replace(\"'\",\"\").replace('[',\"\").replace(']',\"\")\n mysql = \"\"\"SELECT \"\"\" + sensors + \"\"\", tdate, ttime FROM (\n SELECT * FROM userPlants a\n INNER JOIN potData b\n ON a.potID = b.potID \n INNER JOIN roomData c \n ON a.roomID = c.roomID AND b.tdate = c.tdate AND b.ttime = c.ttime\n ORDER BY c.tdate DESC, c.ttime DESC LIMIT \"\"\" + str(rowNumbers) + \"\"\")\"\"\"\n myresult = self.__cursor.execute(mysql).fetchall()\n except sqlite3.Error, e:\n #If error, exit program \n print '\\nDatabase Error %s:' % e.args[0]\n sys.exit(1)\n #Convert data into json format\n stats = json.dumps( [dict(i) for i in myresult] )\n print(\"\\nData Retreived from DB\")\n return stats\n \n #To send the stats with the corresponding opcode\n def send_stats(self, rowNumbers, sensors):\n if rowNumbers == '0':\n #0 means to send app just one most recent row of data (opcode E)\n oneRow = globalServer.get_stats(1, sensors)\n stats = '{\"opcode\" : \"E\", \"statsArray\" : \"' + str(oneRow) + '\"}'\n else:\n #Otherwise send mutiple recent rows of data (opcode 6)\n manyRows = globalServer.get_stats(rowNumbers, sensors)\n stats = '{\"opcode\" : \"6\", \"statsArray\" : \"' + str(manyRows) + '\"}'\n #Send stats to App\n #If ack received return\n if (self.send_notifyApp(error) == True):\n print(\"\\nStats sent to app\")\n else:\n #If no ack received, try sending again\n print(\"\\nStats sent again to app (notify again)\")\n self.send_stats(rowNumbers, sensors)\n return\n\n#Main function which receives json data and invokes methods based on opcode received\ndef main():\n #Create GlobalServer object (port, room_ip_addrs, app_ip_addrs)\n globalServer = GlobalServer(1000, '192.168.1.47',\n '192.168.137.102')\n while True:\n message = globalServer.receive()\n if (message == False):\n #If length of buffer is <1\n continue\n else:\n message = json.loads(message)\n #User wants to update notes table\n if (message.get('opcode') == \"1\"):\n globalServer.updateUserNotesTable(message)\n #User wants to add a pot with a room and owner\n if (message.get('opcode') == \"2\"): \n globalServer.updateUserPlantsTable(message)\n #Set default thresholds for that potID\n globalServer.setDefaultThresholds(message.get(\"potID\"))\n #If user wants to set thresholds to requested ones\n if (message.get('opcode') == \"3\"): \n globalServer.updateUserThresholdsTable(message)\n #If user wants to view stats\n if (message.get('opcode') == \"5\"):\n rowNumbers = message.get(\"rowNumbers\")\n sensors = message.get(\"sensorType\")\n globalServer.send_stats(rowNumbers, sensors)\n #If an error has occured in the room rpi or arduino\n if (message.get('opcode') == \"D\"): \n globalServer.notifyApp(str(message))\n #If room rpi sent all sensory data, update tables, compare values to thresholds as well\n if (message.get('opcode') == \"9\"): \n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n globalServer.updateRoomTable(message, tdate, ttime)\n globalServer.updatePotTable(message, tdate, ttime) \n globalServer.checkUserThresholds() \n self.__soc_recv.shutdown(1)\n self.__soc_send.shutdown(1)\n self.__cursor.close()\n return\n \nif __name__== \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if m < n:
if m - x < x:
x = m - x
if n - y < y:
y = n - y
else:
if n - x < x:
x = n - x
if m - y < y:
y = m - y
if x < y:
print(x)
else:
print(y)
<|reserved_special_token_1|>
n = int(input())
m = int(input())
x = int(input())
y = int(input())
if m < n:
if m - x < x:
x = m - x
if n - y < y:
y = n - y
else:
if n - x < x:
x = n - x
if m - y < y:
y = m - y
if x < y:
print(x)
else:
print(y)
|
flexible
|
{
"blob_id": "002cced6d24a4790d29f195355c795d609f744a7",
"index": 9134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-3": "n = int(input())\nm = int(input())\nx = int(input())\ny = int(input())\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class MockResponseError(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MockResponseParsed(object):
HOV = list()
def __init__(self):
self.HOV.append(('FODT', '010107'))
self.HOV.append(('PERS', '50160'))
self.HOV.append(('NAVN-F', 'TOMAS'))
self.HOV.append(('NAVN-M', ''))
self.HOV.append(('UKJENTFELT', 'something'))
class PersonTests(TestCase):
def test_bad_search_fields(self):
pass
def test_error_response(self):
pass
def tets_empty_response(self):
pass
def test_result(self):
pass
class ResponseTests(TestCase):
def test_has_result(self):
result = MockResponseOK()
parsed = parse_response((200, result))
self.assertEqual(parsed, 'Results')
def test_no_result(self):
result = MockResponseOK(error_code='1', result=False)
parsed = parse_response((200, result))
self.assertIsNone(parsed)
@staticmethod
def test_result_with_error():
result = MockResponseOK(error_code='2', result=False)
with pytest.raises(DSFServiceError):
parse_response((200, result))
@staticmethod
def test_uknown_error():
result = MockResponseError()
with pytest.raises(DSFServiceError):
parse_response((500, result))
class TranslationTests(TestCase):
def test_output_translation(self):
response = MockResponseParsed()
translated = translate_output_fields(response)
self.assertIsInstance(translated, dict)
self.assertTrue('date_of_birth' in translated)
self.assertTrue('person_number' in translated)
self.assertTrue('first_name' in translated)
self.assertTrue('middle_name' in translated)
self.assertEqual(translated['first_name'], 'Tomas')
self.assertEqual(translated['UKJENTFELT'], 'Something')
self.assertIsNone(translated['middle_name'])
def test_input_translation(self):
valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',
'last_name': 'topstad'}
invalid_input_invalid_field = {'end_user': 'unicornis_test',
'invalidfield': 'somevalue'}
with pytest.raises(ValueError):
translate_input_fields(**invalid_input_invalid_field)
translated = translate_input_fields(**valid_input)
self.assertTrue('saksref' in translated)
self.assertTrue('fornavn' in translated)
self.assertTrue('etternavn' in translated)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockError(object):
<|reserved_special_token_0|>
class MockErrorDetail(object):
feil = MockError()
class MockResponseOK(object):
def __init__(self, error_code=None, result=True):
if result:
self.RESULT = 'Results'
else:
self.MESSAGE = MockMessage(error_code)
class MockResponseError(object):
faultstring = 'Fault string'
detail = MockErrorDetail()
def __init__(self):
pass
class MockResponseParsed(object):
HOV = list()
def __init__(self):
self.HOV.append(('FODT', '010107'))
self.HOV.append(('PERS', '50160'))
self.HOV.append(('NAVN-F', 'TOMAS'))
self.HOV.append(('NAVN-M', ''))
self.HOV.append(('UKJENTFELT', 'something'))
class PersonTests(TestCase):
def test_bad_search_fields(self):
pass
def test_error_response(self):
pass
def tets_empty_response(self):
pass
def test_result(self):
pass
class ResponseTests(TestCase):
def test_has_result(self):
result = MockResponseOK()
parsed = parse_response((200, result))
self.assertEqual(parsed, 'Results')
def test_no_result(self):
result = MockResponseOK(error_code='1', result=False)
parsed = parse_response((200, result))
self.assertIsNone(parsed)
@staticmethod
def test_result_with_error():
result = MockResponseOK(error_code='2', result=False)
with pytest.raises(DSFServiceError):
parse_response((200, result))
@staticmethod
def test_uknown_error():
result = MockResponseError()
with pytest.raises(DSFServiceError):
parse_response((500, result))
class TranslationTests(TestCase):
def test_output_translation(self):
response = MockResponseParsed()
translated = translate_output_fields(response)
self.assertIsInstance(translated, dict)
self.assertTrue('date_of_birth' in translated)
self.assertTrue('person_number' in translated)
self.assertTrue('first_name' in translated)
self.assertTrue('middle_name' in translated)
self.assertEqual(translated['first_name'], 'Tomas')
self.assertEqual(translated['UKJENTFELT'], 'Something')
self.assertIsNone(translated['middle_name'])
def test_input_translation(self):
valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',
'last_name': 'topstad'}
invalid_input_invalid_field = {'end_user': 'unicornis_test',
'invalidfield': 'somevalue'}
with pytest.raises(ValueError):
translate_input_fields(**invalid_input_invalid_field)
translated = translate_input_fields(**valid_input)
self.assertTrue('saksref' in translated)
self.assertTrue('fornavn' in translated)
self.assertTrue('etternavn' in translated)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockMessage(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MockError(object):
feilmelding = 'Error message here.'
class MockErrorDetail(object):
feil = MockError()
class MockResponseOK(object):
def __init__(self, error_code=None, result=True):
if result:
self.RESULT = 'Results'
else:
self.MESSAGE = MockMessage(error_code)
class MockResponseError(object):
faultstring = 'Fault string'
detail = MockErrorDetail()
def __init__(self):
pass
class MockResponseParsed(object):
HOV = list()
def __init__(self):
self.HOV.append(('FODT', '010107'))
self.HOV.append(('PERS', '50160'))
self.HOV.append(('NAVN-F', 'TOMAS'))
self.HOV.append(('NAVN-M', ''))
self.HOV.append(('UKJENTFELT', 'something'))
class PersonTests(TestCase):
def test_bad_search_fields(self):
pass
def test_error_response(self):
pass
def tets_empty_response(self):
pass
def test_result(self):
pass
class ResponseTests(TestCase):
def test_has_result(self):
result = MockResponseOK()
parsed = parse_response((200, result))
self.assertEqual(parsed, 'Results')
def test_no_result(self):
result = MockResponseOK(error_code='1', result=False)
parsed = parse_response((200, result))
self.assertIsNone(parsed)
@staticmethod
def test_result_with_error():
result = MockResponseOK(error_code='2', result=False)
with pytest.raises(DSFServiceError):
parse_response((200, result))
@staticmethod
def test_uknown_error():
result = MockResponseError()
with pytest.raises(DSFServiceError):
parse_response((500, result))
class TranslationTests(TestCase):
def test_output_translation(self):
response = MockResponseParsed()
translated = translate_output_fields(response)
self.assertIsInstance(translated, dict)
self.assertTrue('date_of_birth' in translated)
self.assertTrue('person_number' in translated)
self.assertTrue('first_name' in translated)
self.assertTrue('middle_name' in translated)
self.assertEqual(translated['first_name'], 'Tomas')
self.assertEqual(translated['UKJENTFELT'], 'Something')
self.assertIsNone(translated['middle_name'])
def test_input_translation(self):
valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',
'last_name': 'topstad'}
invalid_input_invalid_field = {'end_user': 'unicornis_test',
'invalidfield': 'somevalue'}
with pytest.raises(ValueError):
translate_input_fields(**invalid_input_invalid_field)
translated = translate_input_fields(**valid_input)
self.assertTrue('saksref' in translated)
self.assertTrue('fornavn' in translated)
self.assertTrue('etternavn' in translated)
<|reserved_special_token_1|>
from __future__ import unicode_literals
import pytest
from unittest import TestCase
from pydsf.exceptions import DSFServiceError
from pydsf.service.response import parse_response
from pydsf.service.translations import translate_input_fields, translate_output_fields
class MockMessage(object):
SUMMARY = 'Error summary'
def __init__(self, error_code):
self.CODE = error_code
class MockError(object):
feilmelding = 'Error message here.'
class MockErrorDetail(object):
feil = MockError()
class MockResponseOK(object):
def __init__(self, error_code=None, result=True):
if result:
self.RESULT = 'Results'
else:
self.MESSAGE = MockMessage(error_code)
class MockResponseError(object):
faultstring = 'Fault string'
detail = MockErrorDetail()
def __init__(self):
pass
class MockResponseParsed(object):
HOV = list()
def __init__(self):
self.HOV.append(('FODT', '010107'))
self.HOV.append(('PERS', '50160'))
self.HOV.append(('NAVN-F', 'TOMAS'))
self.HOV.append(('NAVN-M', ''))
self.HOV.append(('UKJENTFELT', 'something'))
class PersonTests(TestCase):
def test_bad_search_fields(self):
pass
def test_error_response(self):
pass
def tets_empty_response(self):
pass
def test_result(self):
pass
class ResponseTests(TestCase):
def test_has_result(self):
result = MockResponseOK()
parsed = parse_response((200, result))
self.assertEqual(parsed, 'Results')
def test_no_result(self):
result = MockResponseOK(error_code='1', result=False)
parsed = parse_response((200, result))
self.assertIsNone(parsed)
@staticmethod
def test_result_with_error():
result = MockResponseOK(error_code='2', result=False)
with pytest.raises(DSFServiceError):
parse_response((200, result))
@staticmethod
def test_uknown_error():
result = MockResponseError()
with pytest.raises(DSFServiceError):
parse_response((500, result))
class TranslationTests(TestCase):
def test_output_translation(self):
response = MockResponseParsed()
translated = translate_output_fields(response)
self.assertIsInstance(translated, dict)
self.assertTrue('date_of_birth' in translated)
self.assertTrue('person_number' in translated)
self.assertTrue('first_name' in translated)
self.assertTrue('middle_name' in translated)
self.assertEqual(translated['first_name'], 'Tomas')
self.assertEqual(translated['UKJENTFELT'], 'Something')
self.assertIsNone(translated['middle_name'])
def test_input_translation(self):
valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',
'last_name': 'topstad'}
invalid_input_invalid_field = {'end_user': 'unicornis_test',
'invalidfield': 'somevalue'}
with pytest.raises(ValueError):
translate_input_fields(**invalid_input_invalid_field)
translated = translate_input_fields(**valid_input)
self.assertTrue('saksref' in translated)
self.assertTrue('fornavn' in translated)
self.assertTrue('etternavn' in translated)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from unittest import TestCase
from pydsf.exceptions import DSFServiceError
from pydsf.service.response import parse_response
from pydsf.service.translations import translate_input_fields, translate_output_fields
class MockMessage(object):
SUMMARY = "Error summary"
def __init__(self, error_code):
self.CODE = error_code
class MockError(object):
feilmelding = "Error message here."
class MockErrorDetail(object):
feil = MockError()
class MockResponseOK(object):
def __init__(self, error_code=None, result=True):
if result:
self.RESULT = "Results"
else:
self.MESSAGE = MockMessage(error_code)
class MockResponseError(object):
faultstring = "Fault string"
detail = MockErrorDetail()
def __init__(self):
pass
class MockResponseParsed(object):
HOV = list()
def __init__(self):
self.HOV.append(("FODT", "010107"))
self.HOV.append(("PERS", "50160"))
self.HOV.append(("NAVN-F", "TOMAS"))
self.HOV.append(("NAVN-M", ""))
self.HOV.append(("UKJENTFELT", "something"))
class PersonTests(TestCase):
def test_bad_search_fields(self):
pass
def test_error_response(self):
pass
def tets_empty_response(self):
pass
def test_result(self):
pass
class ResponseTests(TestCase):
def test_has_result(self):
result = MockResponseOK()
parsed = parse_response((200, result))
self.assertEqual(parsed, "Results")
def test_no_result(self):
result = MockResponseOK(error_code="1", result=False)
parsed = parse_response((200, result))
self.assertIsNone(parsed)
@staticmethod
def test_result_with_error():
result = MockResponseOK(error_code="2", result=False)
with pytest.raises(DSFServiceError):
parse_response((200, result))
@staticmethod
def test_uknown_error():
result = MockResponseError()
with pytest.raises(DSFServiceError):
parse_response((500, result))
class TranslationTests(TestCase):
def test_output_translation(self):
response = MockResponseParsed()
translated = translate_output_fields(response)
self.assertIsInstance(translated, dict)
self.assertTrue("date_of_birth" in translated)
self.assertTrue("person_number" in translated)
self.assertTrue("first_name" in translated)
self.assertTrue("middle_name" in translated)
# Verify capitalisation and None
self.assertEqual(translated["first_name"], "Tomas")
self.assertEqual(translated["UKJENTFELT"], "Something")
# Verify that empty strings are translated to None
self.assertIsNone(translated["middle_name"])
def test_input_translation(self):
valid_input = {
"end_user": "unicornis-test",
"first_name": "tomas",
"last_name": "topstad"
}
invalid_input_invalid_field = {
"end_user": "unicornis_test",
"invalidfield": "somevalue"
}
with pytest.raises(ValueError):
translate_input_fields(**invalid_input_invalid_field)
translated = translate_input_fields(**valid_input)
self.assertTrue("saksref" in translated)
self.assertTrue("fornavn" in translated)
self.assertTrue("etternavn" in translated)
|
flexible
|
{
"blob_id": "bbff797fab4ac7dc7e6adb81c0eeda561f8ee147",
"index": 9603,
"step-1": "<mask token>\n\n\nclass MockResponseError(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MockResponseParsed(object):\n HOV = list()\n\n def __init__(self):\n self.HOV.append(('FODT', '010107'))\n self.HOV.append(('PERS', '50160'))\n self.HOV.append(('NAVN-F', 'TOMAS'))\n self.HOV.append(('NAVN-M', ''))\n self.HOV.append(('UKJENTFELT', 'something'))\n\n\nclass PersonTests(TestCase):\n\n def test_bad_search_fields(self):\n pass\n\n def test_error_response(self):\n pass\n\n def tets_empty_response(self):\n pass\n\n def test_result(self):\n pass\n\n\nclass ResponseTests(TestCase):\n\n def test_has_result(self):\n result = MockResponseOK()\n parsed = parse_response((200, result))\n self.assertEqual(parsed, 'Results')\n\n def test_no_result(self):\n result = MockResponseOK(error_code='1', result=False)\n parsed = parse_response((200, result))\n self.assertIsNone(parsed)\n\n @staticmethod\n def test_result_with_error():\n result = MockResponseOK(error_code='2', result=False)\n with pytest.raises(DSFServiceError):\n parse_response((200, result))\n\n @staticmethod\n def test_uknown_error():\n result = MockResponseError()\n with pytest.raises(DSFServiceError):\n parse_response((500, result))\n\n\nclass TranslationTests(TestCase):\n\n def test_output_translation(self):\n response = MockResponseParsed()\n translated = translate_output_fields(response)\n self.assertIsInstance(translated, dict)\n self.assertTrue('date_of_birth' in translated)\n self.assertTrue('person_number' in translated)\n self.assertTrue('first_name' in translated)\n self.assertTrue('middle_name' in translated)\n self.assertEqual(translated['first_name'], 'Tomas')\n self.assertEqual(translated['UKJENTFELT'], 'Something')\n self.assertIsNone(translated['middle_name'])\n\n def test_input_translation(self):\n valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',\n 'last_name': 'topstad'}\n invalid_input_invalid_field = {'end_user': 'unicornis_test',\n 'invalidfield': 'somevalue'}\n with pytest.raises(ValueError):\n translate_input_fields(**invalid_input_invalid_field)\n translated = translate_input_fields(**valid_input)\n self.assertTrue('saksref' in translated)\n self.assertTrue('fornavn' in translated)\n self.assertTrue('etternavn' in translated)\n",
"step-2": "<mask token>\n\n\nclass MockError(object):\n <mask token>\n\n\nclass MockErrorDetail(object):\n feil = MockError()\n\n\nclass MockResponseOK(object):\n\n def __init__(self, error_code=None, result=True):\n if result:\n self.RESULT = 'Results'\n else:\n self.MESSAGE = MockMessage(error_code)\n\n\nclass MockResponseError(object):\n faultstring = 'Fault string'\n detail = MockErrorDetail()\n\n def __init__(self):\n pass\n\n\nclass MockResponseParsed(object):\n HOV = list()\n\n def __init__(self):\n self.HOV.append(('FODT', '010107'))\n self.HOV.append(('PERS', '50160'))\n self.HOV.append(('NAVN-F', 'TOMAS'))\n self.HOV.append(('NAVN-M', ''))\n self.HOV.append(('UKJENTFELT', 'something'))\n\n\nclass PersonTests(TestCase):\n\n def test_bad_search_fields(self):\n pass\n\n def test_error_response(self):\n pass\n\n def tets_empty_response(self):\n pass\n\n def test_result(self):\n pass\n\n\nclass ResponseTests(TestCase):\n\n def test_has_result(self):\n result = MockResponseOK()\n parsed = parse_response((200, result))\n self.assertEqual(parsed, 'Results')\n\n def test_no_result(self):\n result = MockResponseOK(error_code='1', result=False)\n parsed = parse_response((200, result))\n self.assertIsNone(parsed)\n\n @staticmethod\n def test_result_with_error():\n result = MockResponseOK(error_code='2', result=False)\n with pytest.raises(DSFServiceError):\n parse_response((200, result))\n\n @staticmethod\n def test_uknown_error():\n result = MockResponseError()\n with pytest.raises(DSFServiceError):\n parse_response((500, result))\n\n\nclass TranslationTests(TestCase):\n\n def test_output_translation(self):\n response = MockResponseParsed()\n translated = translate_output_fields(response)\n self.assertIsInstance(translated, dict)\n self.assertTrue('date_of_birth' in translated)\n self.assertTrue('person_number' in translated)\n self.assertTrue('first_name' in translated)\n self.assertTrue('middle_name' in translated)\n self.assertEqual(translated['first_name'], 'Tomas')\n self.assertEqual(translated['UKJENTFELT'], 'Something')\n self.assertIsNone(translated['middle_name'])\n\n def test_input_translation(self):\n valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',\n 'last_name': 'topstad'}\n invalid_input_invalid_field = {'end_user': 'unicornis_test',\n 'invalidfield': 'somevalue'}\n with pytest.raises(ValueError):\n translate_input_fields(**invalid_input_invalid_field)\n translated = translate_input_fields(**valid_input)\n self.assertTrue('saksref' in translated)\n self.assertTrue('fornavn' in translated)\n self.assertTrue('etternavn' in translated)\n",
"step-3": "<mask token>\n\n\nclass MockMessage(object):\n <mask token>\n <mask token>\n\n\nclass MockError(object):\n feilmelding = 'Error message here.'\n\n\nclass MockErrorDetail(object):\n feil = MockError()\n\n\nclass MockResponseOK(object):\n\n def __init__(self, error_code=None, result=True):\n if result:\n self.RESULT = 'Results'\n else:\n self.MESSAGE = MockMessage(error_code)\n\n\nclass MockResponseError(object):\n faultstring = 'Fault string'\n detail = MockErrorDetail()\n\n def __init__(self):\n pass\n\n\nclass MockResponseParsed(object):\n HOV = list()\n\n def __init__(self):\n self.HOV.append(('FODT', '010107'))\n self.HOV.append(('PERS', '50160'))\n self.HOV.append(('NAVN-F', 'TOMAS'))\n self.HOV.append(('NAVN-M', ''))\n self.HOV.append(('UKJENTFELT', 'something'))\n\n\nclass PersonTests(TestCase):\n\n def test_bad_search_fields(self):\n pass\n\n def test_error_response(self):\n pass\n\n def tets_empty_response(self):\n pass\n\n def test_result(self):\n pass\n\n\nclass ResponseTests(TestCase):\n\n def test_has_result(self):\n result = MockResponseOK()\n parsed = parse_response((200, result))\n self.assertEqual(parsed, 'Results')\n\n def test_no_result(self):\n result = MockResponseOK(error_code='1', result=False)\n parsed = parse_response((200, result))\n self.assertIsNone(parsed)\n\n @staticmethod\n def test_result_with_error():\n result = MockResponseOK(error_code='2', result=False)\n with pytest.raises(DSFServiceError):\n parse_response((200, result))\n\n @staticmethod\n def test_uknown_error():\n result = MockResponseError()\n with pytest.raises(DSFServiceError):\n parse_response((500, result))\n\n\nclass TranslationTests(TestCase):\n\n def test_output_translation(self):\n response = MockResponseParsed()\n translated = translate_output_fields(response)\n self.assertIsInstance(translated, dict)\n self.assertTrue('date_of_birth' in translated)\n self.assertTrue('person_number' in translated)\n self.assertTrue('first_name' in translated)\n self.assertTrue('middle_name' in translated)\n self.assertEqual(translated['first_name'], 'Tomas')\n self.assertEqual(translated['UKJENTFELT'], 'Something')\n self.assertIsNone(translated['middle_name'])\n\n def test_input_translation(self):\n valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',\n 'last_name': 'topstad'}\n invalid_input_invalid_field = {'end_user': 'unicornis_test',\n 'invalidfield': 'somevalue'}\n with pytest.raises(ValueError):\n translate_input_fields(**invalid_input_invalid_field)\n translated = translate_input_fields(**valid_input)\n self.assertTrue('saksref' in translated)\n self.assertTrue('fornavn' in translated)\n self.assertTrue('etternavn' in translated)\n",
"step-4": "from __future__ import unicode_literals\nimport pytest\nfrom unittest import TestCase\nfrom pydsf.exceptions import DSFServiceError\nfrom pydsf.service.response import parse_response\nfrom pydsf.service.translations import translate_input_fields, translate_output_fields\n\n\nclass MockMessage(object):\n SUMMARY = 'Error summary'\n\n def __init__(self, error_code):\n self.CODE = error_code\n\n\nclass MockError(object):\n feilmelding = 'Error message here.'\n\n\nclass MockErrorDetail(object):\n feil = MockError()\n\n\nclass MockResponseOK(object):\n\n def __init__(self, error_code=None, result=True):\n if result:\n self.RESULT = 'Results'\n else:\n self.MESSAGE = MockMessage(error_code)\n\n\nclass MockResponseError(object):\n faultstring = 'Fault string'\n detail = MockErrorDetail()\n\n def __init__(self):\n pass\n\n\nclass MockResponseParsed(object):\n HOV = list()\n\n def __init__(self):\n self.HOV.append(('FODT', '010107'))\n self.HOV.append(('PERS', '50160'))\n self.HOV.append(('NAVN-F', 'TOMAS'))\n self.HOV.append(('NAVN-M', ''))\n self.HOV.append(('UKJENTFELT', 'something'))\n\n\nclass PersonTests(TestCase):\n\n def test_bad_search_fields(self):\n pass\n\n def test_error_response(self):\n pass\n\n def tets_empty_response(self):\n pass\n\n def test_result(self):\n pass\n\n\nclass ResponseTests(TestCase):\n\n def test_has_result(self):\n result = MockResponseOK()\n parsed = parse_response((200, result))\n self.assertEqual(parsed, 'Results')\n\n def test_no_result(self):\n result = MockResponseOK(error_code='1', result=False)\n parsed = parse_response((200, result))\n self.assertIsNone(parsed)\n\n @staticmethod\n def test_result_with_error():\n result = MockResponseOK(error_code='2', result=False)\n with pytest.raises(DSFServiceError):\n parse_response((200, result))\n\n @staticmethod\n def test_uknown_error():\n result = MockResponseError()\n with pytest.raises(DSFServiceError):\n parse_response((500, result))\n\n\nclass TranslationTests(TestCase):\n\n def test_output_translation(self):\n response = MockResponseParsed()\n translated = translate_output_fields(response)\n self.assertIsInstance(translated, dict)\n self.assertTrue('date_of_birth' in translated)\n self.assertTrue('person_number' in translated)\n self.assertTrue('first_name' in translated)\n self.assertTrue('middle_name' in translated)\n self.assertEqual(translated['first_name'], 'Tomas')\n self.assertEqual(translated['UKJENTFELT'], 'Something')\n self.assertIsNone(translated['middle_name'])\n\n def test_input_translation(self):\n valid_input = {'end_user': 'unicornis-test', 'first_name': 'tomas',\n 'last_name': 'topstad'}\n invalid_input_invalid_field = {'end_user': 'unicornis_test',\n 'invalidfield': 'somevalue'}\n with pytest.raises(ValueError):\n translate_input_fields(**invalid_input_invalid_field)\n translated = translate_input_fields(**valid_input)\n self.assertTrue('saksref' in translated)\n self.assertTrue('fornavn' in translated)\n self.assertTrue('etternavn' in translated)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport pytest\nfrom unittest import TestCase\n\nfrom pydsf.exceptions import DSFServiceError\nfrom pydsf.service.response import parse_response\nfrom pydsf.service.translations import translate_input_fields, translate_output_fields\n\n\nclass MockMessage(object):\n SUMMARY = \"Error summary\"\n\n def __init__(self, error_code):\n self.CODE = error_code\n\n\nclass MockError(object):\n feilmelding = \"Error message here.\"\n\n\nclass MockErrorDetail(object):\n feil = MockError()\n\n\nclass MockResponseOK(object):\n\n def __init__(self, error_code=None, result=True):\n if result:\n self.RESULT = \"Results\"\n else:\n self.MESSAGE = MockMessage(error_code)\n\n\nclass MockResponseError(object):\n\n faultstring = \"Fault string\"\n detail = MockErrorDetail()\n\n def __init__(self):\n pass\n\n\nclass MockResponseParsed(object):\n HOV = list()\n\n def __init__(self):\n self.HOV.append((\"FODT\", \"010107\"))\n self.HOV.append((\"PERS\", \"50160\"))\n self.HOV.append((\"NAVN-F\", \"TOMAS\"))\n self.HOV.append((\"NAVN-M\", \"\"))\n self.HOV.append((\"UKJENTFELT\", \"something\"))\n\n\nclass PersonTests(TestCase):\n\n def test_bad_search_fields(self):\n pass\n\n def test_error_response(self):\n pass\n\n def tets_empty_response(self):\n pass\n\n def test_result(self):\n pass\n\n\nclass ResponseTests(TestCase):\n\n def test_has_result(self):\n result = MockResponseOK()\n parsed = parse_response((200, result))\n\n self.assertEqual(parsed, \"Results\")\n\n def test_no_result(self):\n result = MockResponseOK(error_code=\"1\", result=False)\n\n parsed = parse_response((200, result))\n\n self.assertIsNone(parsed)\n\n @staticmethod\n def test_result_with_error():\n result = MockResponseOK(error_code=\"2\", result=False)\n\n with pytest.raises(DSFServiceError):\n parse_response((200, result))\n\n @staticmethod\n def test_uknown_error():\n result = MockResponseError()\n\n with pytest.raises(DSFServiceError):\n parse_response((500, result))\n\n\nclass TranslationTests(TestCase):\n\n def test_output_translation(self):\n response = MockResponseParsed()\n translated = translate_output_fields(response)\n\n self.assertIsInstance(translated, dict)\n self.assertTrue(\"date_of_birth\" in translated)\n self.assertTrue(\"person_number\" in translated)\n self.assertTrue(\"first_name\" in translated)\n self.assertTrue(\"middle_name\" in translated)\n\n # Verify capitalisation and None\n self.assertEqual(translated[\"first_name\"], \"Tomas\")\n self.assertEqual(translated[\"UKJENTFELT\"], \"Something\")\n\n # Verify that empty strings are translated to None\n self.assertIsNone(translated[\"middle_name\"])\n\n def test_input_translation(self):\n valid_input = {\n \"end_user\": \"unicornis-test\",\n \"first_name\": \"tomas\",\n \"last_name\": \"topstad\"\n }\n invalid_input_invalid_field = {\n \"end_user\": \"unicornis_test\",\n \"invalidfield\": \"somevalue\"\n }\n\n with pytest.raises(ValueError):\n translate_input_fields(**invalid_input_invalid_field)\n\n translated = translate_input_fields(**valid_input)\n self.assertTrue(\"saksref\" in translated)\n self.assertTrue(\"fornavn\" in translated)\n self.assertTrue(\"etternavn\" in translated)\n",
"step-ids": [
17,
24,
26,
29,
30
]
}
|
[
17,
24,
26,
29,
30
] |
<|reserved_special_token_0|>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
<|reserved_special_token_0|>
for c in state:
if c == '#':
break
start += 1
<|reserved_special_token_0|>
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
<|reserved_special_token_0|>
print(solution)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff * gen
solution = diff * 50000000000 + b
print(solution)
<|reserved_special_token_1|>
import re
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search('initial state:\\s([\\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile('([\\.#]{5})\\s=>\\s([\\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state) - 3:
curr_str = initial_state[i - 2:i + 3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total - previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff * gen
solution = diff * 50000000000 + b
print(solution)
<|reserved_special_token_1|>
import re
def read_input():
with open('../input/day12.txt') as f:
lines = f.readlines()
m = re.search(r'initial state:\s([\.#]+)', lines[0])
initial_state = m.groups()[0]
prog = re.compile(r'([\.#]{5})\s=>\s([\.#])')
rules = []
for i in range(2, len(lines)):
m = prog.search(lines[i])
groups = m.groups()
if groups[1] == '#':
rules.append((groups[0], groups[1]))
return initial_state, rules
def apply_gen(initial_state, rules, start):
next_state = []
initial_state = '....' + initial_state.strip('.') + '....'
set_start_idx = False
i = 2
while i <= len(initial_state)-3:
curr_str = initial_state[i-2:i+3]
rule_matches = None
for r in rules:
if curr_str == r[0]:
rule_matches = r
break
if rule_matches:
if not set_start_idx:
start_idx = i - 4
set_start_idx = True
next_state.append(rule_matches[1])
else:
next_state.append('.')
i += 1
return start + start_idx, ''.join(next_state).strip('.')
def sum_plants(state, start):
i = start
plant_count = 0
for c in state:
if c == '#':
plant_count += i
i += 1
return plant_count
state, rules = read_input()
start = 0
for c in state:
if c == '#':
break
start += 1
gen = 0
start_idx = -2
previos = sum_plants(state, start)
prev_diff = 0
same_diff_count = 0
while gen < 1000:
start, state = apply_gen(state, rules, start)
total = sum_plants(state, start)
diff = total-previos
gen += 1
if diff == prev_diff:
same_diff_count += 1
if same_diff_count == 100:
break
previos = total
prev_diff = diff
b = total - diff*gen
solution = diff * 50000000000 + b
print(solution)
|
flexible
|
{
"blob_id": "27f001f4e79291825c56642693894375fef3e66a",
"index": 1647,
"step-1": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\n<mask token>\nfor c in state:\n if c == '#':\n break\n start += 1\n<mask token>\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\n<mask token>\nprint(solution)\n",
"step-3": "<mask token>\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff * gen\nsolution = diff * 50000000000 + b\nprint(solution)\n",
"step-4": "import re\n\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search('initial state:\\\\s([\\\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile('([\\\\.#]{5})\\\\s=>\\\\s([\\\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state) - 3:\n curr_str = initial_state[i - 2:i + 3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total - previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff * gen\nsolution = diff * 50000000000 + b\nprint(solution)\n",
"step-5": "import re\n\ndef read_input():\n with open('../input/day12.txt') as f:\n lines = f.readlines()\n m = re.search(r'initial state:\\s([\\.#]+)', lines[0])\n initial_state = m.groups()[0]\n prog = re.compile(r'([\\.#]{5})\\s=>\\s([\\.#])')\n rules = []\n for i in range(2, len(lines)):\n m = prog.search(lines[i])\n groups = m.groups()\n if groups[1] == '#':\n rules.append((groups[0], groups[1]))\n return initial_state, rules\n\ndef apply_gen(initial_state, rules, start):\n next_state = []\n initial_state = '....' + initial_state.strip('.') + '....'\n set_start_idx = False\n i = 2\n while i <= len(initial_state)-3:\n curr_str = initial_state[i-2:i+3]\n rule_matches = None\n for r in rules:\n if curr_str == r[0]:\n rule_matches = r\n break\n if rule_matches:\n if not set_start_idx:\n start_idx = i - 4\n set_start_idx = True\n next_state.append(rule_matches[1])\n else:\n next_state.append('.')\n i += 1\n return start + start_idx, ''.join(next_state).strip('.')\n\ndef sum_plants(state, start):\n i = start\n plant_count = 0\n for c in state:\n if c == '#':\n plant_count += i\n i += 1\n return plant_count\n\nstate, rules = read_input()\nstart = 0\nfor c in state:\n if c == '#':\n break\n start += 1\ngen = 0\nstart_idx = -2\nprevios = sum_plants(state, start)\nprev_diff = 0\nsame_diff_count = 0\nwhile gen < 1000:\n start, state = apply_gen(state, rules, start)\n total = sum_plants(state, start)\n diff = total-previos\n gen += 1\n if diff == prev_diff:\n same_diff_count += 1\n if same_diff_count == 100:\n break\n previos = total\n prev_diff = diff\nb = total - diff*gen\nsolution = diff * 50000000000 + b\nprint(solution)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SmartChineseAnalyzer:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SmartChineseAnalyzer:
<|reserved_special_token_0|>
def create_components(self, filename):
if self.stopwords:
result = StopFilter(result, self.stopwords)
return TokenStreamComponents(tokenizer, result)
<|reserved_special_token_1|>
class SmartChineseAnalyzer:
def __init__(self):
pass
def create_components(self, filename):
if self.stopwords:
result = StopFilter(result, self.stopwords)
return TokenStreamComponents(tokenizer, result)
<|reserved_special_token_1|>
class SmartChineseAnalyzer:
def __init__(self):
pass
def create_components(self, filename):
#tokenizer = SentenceTokenize(filename)
#result = WordTokenFilter(tokenizer)
#result = PorterStemFilter(result)
if self.stopwords:
result = StopFilter(result, self.stopwords)
return TokenStreamComponents(tokenizer, result)
|
flexible
|
{
"blob_id": "e486e0ab91a8f5671435f5bbcf5340a62a970d3a",
"index": 8670,
"step-1": "<mask token>\n",
"step-2": "class SmartChineseAnalyzer:\n <mask token>\n <mask token>\n",
"step-3": "class SmartChineseAnalyzer:\n <mask token>\n\n def create_components(self, filename):\n if self.stopwords:\n result = StopFilter(result, self.stopwords)\n return TokenStreamComponents(tokenizer, result)\n",
"step-4": "class SmartChineseAnalyzer:\n\n def __init__(self):\n pass\n\n def create_components(self, filename):\n if self.stopwords:\n result = StopFilter(result, self.stopwords)\n return TokenStreamComponents(tokenizer, result)\n",
"step-5": "class SmartChineseAnalyzer:\n def __init__(self):\n pass\n\n def create_components(self, filename):\n #tokenizer = SentenceTokenize(filename)\n #result = WordTokenFilter(tokenizer)\n #result = PorterStemFilter(result)\n \n if self.stopwords:\n result = StopFilter(result, self.stopwords)\n return TokenStreamComponents(tokenizer, result)\n\n\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LoadStudentsTTable(LoadTable):
<|reserved_special_token_0|>
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index('df')
<|reserved_special_token_0|>
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index('df')
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
confidence = 1.0 - float(col)
return confidence
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoadTable(object):
<|reserved_special_token_0|>
def __init__(self, filename):
self.filename = filename
def load_table(self):
table = pd.read_csv(self.filename)
return table
class LoadNormalTable(LoadTable):
""" A normal table object.
"""
def __init__(self):
LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))
temp_table = self.load_table()
self.normal_table = temp_table.set_index('z')
def find_z(self, prob, tails=1):
""" Given probability, return nearest Z-score from normal table.
Parameters
----------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
tails : int
1 or 2. The prob will be divided by this number (all
calculations assume one tail). Do not change to 2 if your
`prob` value already is divided in half.
Returns
-------
z_score : float
The Z-score or standard score.
"""
prob /= float(tails)
normal_table = self.normal_table
nearest_probs = []
for col in list(normal_table):
nearest_probs.append(find_nearest(normal_table[col], prob))
nearest_probs = np.asarray(nearest_probs)
final_prob = find_nearest(nearest_probs, prob)
for col in list(normal_table):
if final_prob in list(normal_table[col]):
z1 = col
for i in normal_table.index:
if final_prob == normal_table[z1][i]:
z0 = i
z_score = float(z0) + float(z1)
return z_score
def find_prob(self, z, tails=1):
""" Given Z-score, return nearest probability from table.
Parameters
----------
z : float
The Z-score or standard score.
tails : int
1 or 2.
Returns
-------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
"""
normal_table = self.normal_table
if z > 4:
prob = 0.5
else:
z0 = round(z, 1)
z1 = str(round(z, 2) - z0)
prob = round(normal_table[z1][z0], 6)
prob *= tails
return prob
class LoadStudentsTTable(LoadTable):
""" A normal table object.
"""
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index('df')
def find_t(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
t_score : float
The test statistic.
"""
t_table = self.t_table
nearest_confidence = round(find_nearest(list(t_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(t_table.index, df), 0)
t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)
return t_score
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index('df')
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
confidence = 1.0 - float(col)
return confidence
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoadTable(object):
"""
"""
def __init__(self, filename):
self.filename = filename
def load_table(self):
table = pd.read_csv(self.filename)
return table
class LoadNormalTable(LoadTable):
""" A normal table object.
"""
def __init__(self):
LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))
temp_table = self.load_table()
self.normal_table = temp_table.set_index('z')
def find_z(self, prob, tails=1):
""" Given probability, return nearest Z-score from normal table.
Parameters
----------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
tails : int
1 or 2. The prob will be divided by this number (all
calculations assume one tail). Do not change to 2 if your
`prob` value already is divided in half.
Returns
-------
z_score : float
The Z-score or standard score.
"""
prob /= float(tails)
normal_table = self.normal_table
nearest_probs = []
for col in list(normal_table):
nearest_probs.append(find_nearest(normal_table[col], prob))
nearest_probs = np.asarray(nearest_probs)
final_prob = find_nearest(nearest_probs, prob)
for col in list(normal_table):
if final_prob in list(normal_table[col]):
z1 = col
for i in normal_table.index:
if final_prob == normal_table[z1][i]:
z0 = i
z_score = float(z0) + float(z1)
return z_score
def find_prob(self, z, tails=1):
""" Given Z-score, return nearest probability from table.
Parameters
----------
z : float
The Z-score or standard score.
tails : int
1 or 2.
Returns
-------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
"""
normal_table = self.normal_table
if z > 4:
prob = 0.5
else:
z0 = round(z, 1)
z1 = str(round(z, 2) - z0)
prob = round(normal_table[z1][z0], 6)
prob *= tails
return prob
class LoadStudentsTTable(LoadTable):
""" A normal table object.
"""
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index('df')
def find_t(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
t_score : float
The test statistic.
"""
t_table = self.t_table
nearest_confidence = round(find_nearest(list(t_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(t_table.index, df), 0)
t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)
return t_score
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index('df')
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
confidence = 1.0 - float(col)
return confidence
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
p = os.path.abspath(__file__)
p = '/'.join(p.split('/')[:-1])
class LoadTable(object):
"""
"""
def __init__(self, filename):
self.filename = filename
def load_table(self):
table = pd.read_csv(self.filename)
return table
class LoadNormalTable(LoadTable):
""" A normal table object.
"""
def __init__(self):
LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))
temp_table = self.load_table()
self.normal_table = temp_table.set_index('z')
def find_z(self, prob, tails=1):
""" Given probability, return nearest Z-score from normal table.
Parameters
----------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
tails : int
1 or 2. The prob will be divided by this number (all
calculations assume one tail). Do not change to 2 if your
`prob` value already is divided in half.
Returns
-------
z_score : float
The Z-score or standard score.
"""
prob /= float(tails)
normal_table = self.normal_table
nearest_probs = []
for col in list(normal_table):
nearest_probs.append(find_nearest(normal_table[col], prob))
nearest_probs = np.asarray(nearest_probs)
final_prob = find_nearest(nearest_probs, prob)
for col in list(normal_table):
if final_prob in list(normal_table[col]):
z1 = col
for i in normal_table.index:
if final_prob == normal_table[z1][i]:
z0 = i
z_score = float(z0) + float(z1)
return z_score
def find_prob(self, z, tails=1):
""" Given Z-score, return nearest probability from table.
Parameters
----------
z : float
The Z-score or standard score.
tails : int
1 or 2.
Returns
-------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
"""
normal_table = self.normal_table
if z > 4:
prob = 0.5
else:
z0 = round(z, 1)
z1 = str(round(z, 2) - z0)
prob = round(normal_table[z1][z0], 6)
prob *= tails
return prob
class LoadStudentsTTable(LoadTable):
""" A normal table object.
"""
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p,
'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index('df')
def find_t(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
t_score : float
The test statistic.
"""
t_table = self.t_table
nearest_confidence = round(find_nearest(list(t_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(t_table.index, df), 0)
t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)
return t_score
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index('df')
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -
confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
confidence = 1.0 - float(col)
return confidence
def find_nearest(array, value):
array = np.array(array, dtype=float)
value = float(value)
idx = pd.Series(np.abs(array - value)).idxmin()
return array[idx]
<|reserved_special_token_1|>
"""
Author:
C.M. Gosmeyer
Date:
Mar 2018
References:
"Introduction to Statistical Problem Solving in Geography",
J.C. McGrew, Jr., A.J. Lembo, Jr., C.B. Monroe
To Do:
Should tables interpolate?
y = y1 + ((x - x1) / (x2 - x1)) * (y2 - y1)
"""
import numpy as np
import pandas as pd
import os
# Get absolute path to table files.
p = os.path.abspath(__file__)
p = '/'.join(p.split('/')[:-1])
class LoadTable(object):
"""
"""
def __init__(self, filename):
self.filename = filename
def load_table(self):
table = pd.read_csv(self.filename)
return table
class LoadNormalTable(LoadTable):
""" A normal table object.
"""
def __init__(self):
LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))
temp_table = self.load_table()
self.normal_table = temp_table.set_index("z")
def find_z(self, prob, tails=1):
""" Given probability, return nearest Z-score from normal table.
Parameters
----------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
tails : int
1 or 2. The prob will be divided by this number (all
calculations assume one tail). Do not change to 2 if your
`prob` value already is divided in half.
Returns
-------
z_score : float
The Z-score or standard score.
"""
prob /= float(tails)
normal_table = self.normal_table
# Find closest probability in table
nearest_probs = []
for col in list(normal_table):
nearest_probs.append(find_nearest(normal_table[col], prob))
nearest_probs = np.asarray(nearest_probs)
final_prob = find_nearest(nearest_probs, prob)
# Return the column and row
for col in list(normal_table):
if final_prob in list(normal_table[col]):
z1 = col
for i in normal_table.index:
if final_prob == normal_table[z1][i]:
z0 = i
# Build Z-score
z_score = float(z0) + float(z1)
return z_score
def find_prob(self, z, tails=1):
""" Given Z-score, return nearest probability from table.
Parameters
----------
z : float
The Z-score or standard score.
tails : int
1 or 2.
Returns
-------
prob : float
The probability, i.e., the area under section of probability
distriubtion curve.
"""
normal_table = self.normal_table
if z > 4:
prob = 0.5
else:
z0 = round(z, 1)
z1 = str(round(z, 2) - z0)
prob = round(normal_table[z1][z0], 6)
prob *= tails
return prob
class LoadStudentsTTable(LoadTable):
""" A normal table object.
"""
def __init__(self, tails):
"""
Parameters
----------
tails : int
1 or 2.
"""
if tails == 1:
LoadTable.__init__(self, os.path.join(p, 'students_t_table_one_tail.csv'))
else:
LoadTable.__init__(self, os.path.join(p, 'students_t_table_two_tail.csv'))
temp_table = self.load_table()
self.t_table = temp_table.set_index("df")
def find_t(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
t_score : float
The test statistic.
"""
t_table = self.t_table
nearest_confidence = round(find_nearest(list(t_table), 1.0-confidence), 4)
nearest_df = round(find_nearest(t_table.index, df), 0)
t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)
return t_score
def find_confidence(self, t, df):
""" Finds confidence level (area) of ONE tail of distribution.
Parameters
----------
t : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
t_table = self.t_table
nearest_df = round(find_nearest(t_table.index, df), 0)
nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)
for col in list(t_table):
if nearest_t == round(t_table[col][nearest_df], 6):
# Subtract from one to get confidence, divide by two to get
# single section on positive side of distribution.
confidence = (1.0 - float(col)) / 2.0
return confidence
class LoadChi2Table(LoadTable):
""" A normal table object.
"""
def __init__(self):
"""
"""
LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))
temp_table = self.load_table()
self.chi2_table = temp_table.set_index("df")
def find_chi2(self, df, confidence=0.95):
""" Finds the T-value of distribution. The table goes to df-1000,
after which all is effectively infinity and returns same value.
By default the confidence level is 95%.
Parameters
----------
df : int
Degrees of freedom (size of sample).
confidence : float
The confidence level (area under distriubtion curve within
interval).
Returns
-------
chi2 : float
The test statistic.
"""
chi2_table = self.chi2_table
nearest_confidence = round(find_nearest(list(chi2_table), 1.0-confidence), 4)
nearest_df = round(find_nearest(chi2_table.index, df), 0)
chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)
return chi2
def find_confidence(self, chi2, df):
""" Finds confidence level (area) of right-hand-side of distribution.
Parameters
----------
chi2 : float
The test statistic.
df : int
Degrees of freedom (size of sample).
"""
chi2_table = self.chi2_table
nearest_df = round(find_nearest(chi2_table.index, df), 0)
nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)
for col in list(chi2_table):
if nearest_chi2 == round(chi2_table[col][nearest_df], 6):
# Subtract from one to get confidence.
confidence = (1.0 - float(col))
return confidence
def find_nearest(array, value):
array = np.array(array, dtype=float)
value = float(value)
idx = pd.Series((np.abs(array-value))).idxmin()
return array[idx]
|
flexible
|
{
"blob_id": "adb6e33dc665f88c82fcc399688a8dbd67b1e3e3",
"index": 9894,
"step-1": "<mask token>\n\n\nclass LoadStudentsTTable(LoadTable):\n <mask token>\n\n def __init__(self, tails):\n \"\"\"\n\n Parameters\n ----------\n tails : int\n 1 or 2.\n \"\"\"\n if tails == 1:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_one_tail.csv'))\n else:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_two_tail.csv'))\n temp_table = self.load_table()\n self.t_table = temp_table.set_index('df')\n <mask token>\n\n def find_confidence(self, t, df):\n \"\"\" Finds confidence level (area) of ONE tail of distribution.\n\n Parameters\n ----------\n t : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample). \n \"\"\"\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n confidence = (1.0 - float(col)) / 2.0\n return confidence\n\n\nclass LoadChi2Table(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \"\"\"\n LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))\n temp_table = self.load_table()\n self.chi2_table = temp_table.set_index('df')\n\n def find_chi2(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n chi2 : float\n The test statistic.\n \"\"\"\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2\n\n def find_confidence(self, chi2, df):\n \"\"\" Finds confidence level (area) of right-hand-side of distribution.\n\n Parameters\n ----------\n chi2 : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample).\n \"\"\"\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n confidence = 1.0 - float(col)\n return confidence\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoadTable(object):\n <mask token>\n\n def __init__(self, filename):\n self.filename = filename\n\n def load_table(self):\n table = pd.read_csv(self.filename)\n return table\n\n\nclass LoadNormalTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))\n temp_table = self.load_table()\n self.normal_table = temp_table.set_index('z')\n\n def find_z(self, prob, tails=1):\n \"\"\" Given probability, return nearest Z-score from normal table.\n\n Parameters\n ----------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n tails : int\n 1 or 2. The prob will be divided by this number (all \n calculations assume one tail). Do not change to 2 if your\n `prob` value already is divided in half.\n\n Returns\n -------\n z_score : float\n The Z-score or standard score.\n \"\"\"\n prob /= float(tails)\n normal_table = self.normal_table\n nearest_probs = []\n for col in list(normal_table):\n nearest_probs.append(find_nearest(normal_table[col], prob))\n nearest_probs = np.asarray(nearest_probs)\n final_prob = find_nearest(nearest_probs, prob)\n for col in list(normal_table):\n if final_prob in list(normal_table[col]):\n z1 = col\n for i in normal_table.index:\n if final_prob == normal_table[z1][i]:\n z0 = i\n z_score = float(z0) + float(z1)\n return z_score\n\n def find_prob(self, z, tails=1):\n \"\"\" Given Z-score, return nearest probability from table.\n\n Parameters\n ----------\n z : float\n The Z-score or standard score.\n tails : int\n 1 or 2.\n\n Returns\n -------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n \"\"\"\n normal_table = self.normal_table\n if z > 4:\n prob = 0.5\n else:\n z0 = round(z, 1)\n z1 = str(round(z, 2) - z0)\n prob = round(normal_table[z1][z0], 6)\n prob *= tails\n return prob\n\n\nclass LoadStudentsTTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self, tails):\n \"\"\"\n\n Parameters\n ----------\n tails : int\n 1 or 2.\n \"\"\"\n if tails == 1:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_one_tail.csv'))\n else:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_two_tail.csv'))\n temp_table = self.load_table()\n self.t_table = temp_table.set_index('df')\n\n def find_t(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n t_score : float\n The test statistic.\n \"\"\"\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n return t_score\n\n def find_confidence(self, t, df):\n \"\"\" Finds confidence level (area) of ONE tail of distribution.\n\n Parameters\n ----------\n t : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample). \n \"\"\"\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n confidence = (1.0 - float(col)) / 2.0\n return confidence\n\n\nclass LoadChi2Table(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \"\"\"\n LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))\n temp_table = self.load_table()\n self.chi2_table = temp_table.set_index('df')\n\n def find_chi2(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n chi2 : float\n The test statistic.\n \"\"\"\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2\n\n def find_confidence(self, chi2, df):\n \"\"\" Finds confidence level (area) of right-hand-side of distribution.\n\n Parameters\n ----------\n chi2 : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample).\n \"\"\"\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n confidence = 1.0 - float(col)\n return confidence\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LoadTable(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n\n def load_table(self):\n table = pd.read_csv(self.filename)\n return table\n\n\nclass LoadNormalTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))\n temp_table = self.load_table()\n self.normal_table = temp_table.set_index('z')\n\n def find_z(self, prob, tails=1):\n \"\"\" Given probability, return nearest Z-score from normal table.\n\n Parameters\n ----------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n tails : int\n 1 or 2. The prob will be divided by this number (all \n calculations assume one tail). Do not change to 2 if your\n `prob` value already is divided in half.\n\n Returns\n -------\n z_score : float\n The Z-score or standard score.\n \"\"\"\n prob /= float(tails)\n normal_table = self.normal_table\n nearest_probs = []\n for col in list(normal_table):\n nearest_probs.append(find_nearest(normal_table[col], prob))\n nearest_probs = np.asarray(nearest_probs)\n final_prob = find_nearest(nearest_probs, prob)\n for col in list(normal_table):\n if final_prob in list(normal_table[col]):\n z1 = col\n for i in normal_table.index:\n if final_prob == normal_table[z1][i]:\n z0 = i\n z_score = float(z0) + float(z1)\n return z_score\n\n def find_prob(self, z, tails=1):\n \"\"\" Given Z-score, return nearest probability from table.\n\n Parameters\n ----------\n z : float\n The Z-score or standard score.\n tails : int\n 1 or 2.\n\n Returns\n -------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n \"\"\"\n normal_table = self.normal_table\n if z > 4:\n prob = 0.5\n else:\n z0 = round(z, 1)\n z1 = str(round(z, 2) - z0)\n prob = round(normal_table[z1][z0], 6)\n prob *= tails\n return prob\n\n\nclass LoadStudentsTTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self, tails):\n \"\"\"\n\n Parameters\n ----------\n tails : int\n 1 or 2.\n \"\"\"\n if tails == 1:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_one_tail.csv'))\n else:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_two_tail.csv'))\n temp_table = self.load_table()\n self.t_table = temp_table.set_index('df')\n\n def find_t(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n t_score : float\n The test statistic.\n \"\"\"\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n return t_score\n\n def find_confidence(self, t, df):\n \"\"\" Finds confidence level (area) of ONE tail of distribution.\n\n Parameters\n ----------\n t : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample). \n \"\"\"\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n confidence = (1.0 - float(col)) / 2.0\n return confidence\n\n\nclass LoadChi2Table(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \"\"\"\n LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))\n temp_table = self.load_table()\n self.chi2_table = temp_table.set_index('df')\n\n def find_chi2(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n chi2 : float\n The test statistic.\n \"\"\"\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2\n\n def find_confidence(self, chi2, df):\n \"\"\" Finds confidence level (area) of right-hand-side of distribution.\n\n Parameters\n ----------\n chi2 : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample).\n \"\"\"\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n confidence = 1.0 - float(col)\n return confidence\n\n\n<mask token>\n",
"step-4": "<mask token>\np = os.path.abspath(__file__)\np = '/'.join(p.split('/')[:-1])\n\n\nclass LoadTable(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n\n def load_table(self):\n table = pd.read_csv(self.filename)\n return table\n\n\nclass LoadNormalTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))\n temp_table = self.load_table()\n self.normal_table = temp_table.set_index('z')\n\n def find_z(self, prob, tails=1):\n \"\"\" Given probability, return nearest Z-score from normal table.\n\n Parameters\n ----------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n tails : int\n 1 or 2. The prob will be divided by this number (all \n calculations assume one tail). Do not change to 2 if your\n `prob` value already is divided in half.\n\n Returns\n -------\n z_score : float\n The Z-score or standard score.\n \"\"\"\n prob /= float(tails)\n normal_table = self.normal_table\n nearest_probs = []\n for col in list(normal_table):\n nearest_probs.append(find_nearest(normal_table[col], prob))\n nearest_probs = np.asarray(nearest_probs)\n final_prob = find_nearest(nearest_probs, prob)\n for col in list(normal_table):\n if final_prob in list(normal_table[col]):\n z1 = col\n for i in normal_table.index:\n if final_prob == normal_table[z1][i]:\n z0 = i\n z_score = float(z0) + float(z1)\n return z_score\n\n def find_prob(self, z, tails=1):\n \"\"\" Given Z-score, return nearest probability from table.\n\n Parameters\n ----------\n z : float\n The Z-score or standard score.\n tails : int\n 1 or 2.\n\n Returns\n -------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n \"\"\"\n normal_table = self.normal_table\n if z > 4:\n prob = 0.5\n else:\n z0 = round(z, 1)\n z1 = str(round(z, 2) - z0)\n prob = round(normal_table[z1][z0], 6)\n prob *= tails\n return prob\n\n\nclass LoadStudentsTTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self, tails):\n \"\"\"\n\n Parameters\n ----------\n tails : int\n 1 or 2.\n \"\"\"\n if tails == 1:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_one_tail.csv'))\n else:\n LoadTable.__init__(self, os.path.join(p,\n 'students_t_table_two_tail.csv'))\n temp_table = self.load_table()\n self.t_table = temp_table.set_index('df')\n\n def find_t(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n t_score : float\n The test statistic.\n \"\"\"\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n return t_score\n\n def find_confidence(self, t, df):\n \"\"\" Finds confidence level (area) of ONE tail of distribution.\n\n Parameters\n ----------\n t : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample). \n \"\"\"\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n confidence = (1.0 - float(col)) / 2.0\n return confidence\n\n\nclass LoadChi2Table(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \"\"\"\n LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))\n temp_table = self.load_table()\n self.chi2_table = temp_table.set_index('df')\n\n def find_chi2(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n chi2 : float\n The test statistic.\n \"\"\"\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0 -\n confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2\n\n def find_confidence(self, chi2, df):\n \"\"\" Finds confidence level (area) of right-hand-side of distribution.\n\n Parameters\n ----------\n chi2 : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample).\n \"\"\"\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n confidence = 1.0 - float(col)\n return confidence\n\n\ndef find_nearest(array, value):\n array = np.array(array, dtype=float)\n value = float(value)\n idx = pd.Series(np.abs(array - value)).idxmin()\n return array[idx]\n",
"step-5": "\"\"\" \n\nAuthor:\n \n C.M. Gosmeyer\n\nDate:\n\n Mar 2018\n\nReferences:\n\n \"Introduction to Statistical Problem Solving in Geography\", \n J.C. McGrew, Jr., A.J. Lembo, Jr., C.B. Monroe\n\nTo Do:\n\n Should tables interpolate?\n\n y = y1 + ((x - x1) / (x2 - x1)) * (y2 - y1)\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\n\n# Get absolute path to table files.\np = os.path.abspath(__file__)\np = '/'.join(p.split('/')[:-1])\n\nclass LoadTable(object):\n \"\"\"\n \"\"\"\n def __init__(self, filename):\n self.filename = filename\n\n def load_table(self):\n table = pd.read_csv(self.filename)\n return table\n\nclass LoadNormalTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n def __init__(self):\n LoadTable.__init__(self, os.path.join(p, 'normal_table.csv'))\n temp_table = self.load_table()\n self.normal_table = temp_table.set_index(\"z\")\n\n def find_z(self, prob, tails=1):\n \"\"\" Given probability, return nearest Z-score from normal table.\n\n Parameters\n ----------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n tails : int\n 1 or 2. The prob will be divided by this number (all \n calculations assume one tail). Do not change to 2 if your\n `prob` value already is divided in half.\n\n Returns\n -------\n z_score : float\n The Z-score or standard score.\n \"\"\"\n prob /= float(tails)\n normal_table = self.normal_table\n\n # Find closest probability in table\n nearest_probs = []\n for col in list(normal_table):\n nearest_probs.append(find_nearest(normal_table[col], prob))\n nearest_probs = np.asarray(nearest_probs)\n final_prob = find_nearest(nearest_probs, prob)\n\n # Return the column and row\n for col in list(normal_table):\n if final_prob in list(normal_table[col]):\n z1 = col\n\n for i in normal_table.index:\n if final_prob == normal_table[z1][i]:\n z0 = i\n \n # Build Z-score\n z_score = float(z0) + float(z1) \n\n return z_score\n\n def find_prob(self, z, tails=1):\n \"\"\" Given Z-score, return nearest probability from table.\n\n Parameters\n ----------\n z : float\n The Z-score or standard score.\n tails : int\n 1 or 2.\n\n Returns\n -------\n prob : float\n The probability, i.e., the area under section of probability\n distriubtion curve.\n \"\"\"\n normal_table = self.normal_table\n\n if z > 4:\n prob = 0.5\n\n else:\n z0 = round(z, 1)\n z1 = str(round(z, 2) - z0)\n\n prob = round(normal_table[z1][z0], 6)\n prob *= tails\n\n return prob\n\nclass LoadStudentsTTable(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n def __init__(self, tails):\n \"\"\"\n\n Parameters\n ----------\n tails : int\n 1 or 2.\n \"\"\"\n if tails == 1:\n LoadTable.__init__(self, os.path.join(p, 'students_t_table_one_tail.csv'))\n else:\n LoadTable.__init__(self, os.path.join(p, 'students_t_table_two_tail.csv'))\n temp_table = self.load_table()\n self.t_table = temp_table.set_index(\"df\")\n\n def find_t(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n t_score : float\n The test statistic.\n \"\"\"\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n\n return t_score\n\n def find_confidence(self, t, df):\n \"\"\" Finds confidence level (area) of ONE tail of distribution.\n\n Parameters\n ----------\n t : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample). \n \"\"\"\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n # Subtract from one to get confidence, divide by two to get\n # single section on positive side of distribution.\n confidence = (1.0 - float(col)) / 2.0\n return confidence\n\nclass LoadChi2Table(LoadTable):\n \"\"\" A normal table object.\n \"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n LoadTable.__init__(self, os.path.join(p, 'chi_square_table.csv'))\n temp_table = self.load_table()\n self.chi2_table = temp_table.set_index(\"df\") \n\n def find_chi2(self, df, confidence=0.95):\n \"\"\" Finds the T-value of distribution. The table goes to df-1000,\n after which all is effectively infinity and returns same value.\n\n By default the confidence level is 95%.\n\n Parameters\n ----------\n df : int\n Degrees of freedom (size of sample).\n confidence : float\n The confidence level (area under distriubtion curve within\n interval). \n\n Returns\n -------\n chi2 : float\n The test statistic.\n \"\"\"\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2\n\n def find_confidence(self, chi2, df):\n \"\"\" Finds confidence level (area) of right-hand-side of distribution.\n\n Parameters\n ----------\n chi2 : float\n The test statistic.\n df : int\n Degrees of freedom (size of sample).\n \"\"\"\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence\n\ndef find_nearest(array, value):\n array = np.array(array, dtype=float)\n value = float(value)\n idx = pd.Series((np.abs(array-value))).idxmin()\n return array[idx]\n",
"step-ids": [
8,
18,
19,
21,
23
]
}
|
[
8,
18,
19,
21,
23
] |
<|reserved_special_token_0|>
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized], shuffle: bool=True,
word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) ->int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) ->int:
return self.max_sentence_length('target')
<|reserved_special_token_0|>
@property
def target_vocab_size(self) ->int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) ->int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
<|reserved_special_token_0|>
def tokenize(self) ->None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
self.tokenizer_pair['source'].num_words = len([word for word,
count in self.tokenizer_pair['source'].word_counts.items() if
count > self.word_frequency_threshold - 1])
self.tokenizer_pair['target'].num_words = len([word for word,
count in self.tokenizer_pair['target'].word_counts.items() if
count > self.word_frequency_threshold - 1])
def get_sequences(self, level: str) ->np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(sentences, maxlen=self.max_sentence_length(
level), padding='post')
def encode_output(self, sequences: np.array) ->np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) ->str:
target_sentence = [self.tokenizer_pair.target_index_word.get(
word_index, '') for word_index in sequence]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) ->np.ndarray:
return pad_sequences(self.tokenizer_pair['source'].
texts_to_sequences([sentence]), self.max_sentence_length(
'source'), padding='post')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseDataset(SourceTargetMixin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TokenizerPair(SourceTargetMixin):
def __init__(self, tokenizer_class=Tokenizer):
self.source = tokenizer_class()
self.target = tokenizer_class()
@property
def is_tokenized(self) ->bool:
return hasattr(self.source, 'word_index') and hasattr(self.target,
'word_index')
@cached_property
def target_index_word(self):
return {v: k for k, v in self.target.word_index.items()}
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized], shuffle: bool=True,
word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) ->int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) ->int:
return self.max_sentence_length('target')
@property
def source_vocab_size(self) ->int:
return self.tokenizer_pair.source.num_words
@property
def target_vocab_size(self) ->int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) ->int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
def max_sentence_length(self, level: str) ->int:
return max(len(line.split()) for line in self[level])
def tokenize(self) ->None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
self.tokenizer_pair['source'].num_words = len([word for word,
count in self.tokenizer_pair['source'].word_counts.items() if
count > self.word_frequency_threshold - 1])
self.tokenizer_pair['target'].num_words = len([word for word,
count in self.tokenizer_pair['target'].word_counts.items() if
count > self.word_frequency_threshold - 1])
def get_sequences(self, level: str) ->np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(sentences, maxlen=self.max_sentence_length(
level), padding='post')
def encode_output(self, sequences: np.array) ->np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) ->str:
target_sentence = [self.tokenizer_pair.target_index_word.get(
word_index, '') for word_index in sequence]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) ->np.ndarray:
return pad_sequences(self.tokenizer_pair['source'].
texts_to_sequences([sentence]), self.max_sentence_length(
'source'), padding='post')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseDataset(SourceTargetMixin):
def __init__(self, source: Union[Iterable, Sized], target: Union[
Iterable, Sized], shuffle: bool=True, seed: int=42):
self.source = source
self.target = target
self._validate()
if shuffle:
self.shuffle(seed)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TokenizerPair(SourceTargetMixin):
def __init__(self, tokenizer_class=Tokenizer):
self.source = tokenizer_class()
self.target = tokenizer_class()
@property
def is_tokenized(self) ->bool:
return hasattr(self.source, 'word_index') and hasattr(self.target,
'word_index')
@cached_property
def target_index_word(self):
return {v: k for k, v in self.target.word_index.items()}
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized], shuffle: bool=True,
word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) ->int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) ->int:
return self.max_sentence_length('target')
@property
def source_vocab_size(self) ->int:
return self.tokenizer_pair.source.num_words
@property
def target_vocab_size(self) ->int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) ->int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
def max_sentence_length(self, level: str) ->int:
return max(len(line.split()) for line in self[level])
def tokenize(self) ->None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
self.tokenizer_pair['source'].num_words = len([word for word,
count in self.tokenizer_pair['source'].word_counts.items() if
count > self.word_frequency_threshold - 1])
self.tokenizer_pair['target'].num_words = len([word for word,
count in self.tokenizer_pair['target'].word_counts.items() if
count > self.word_frequency_threshold - 1])
def get_sequences(self, level: str) ->np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(sentences, maxlen=self.max_sentence_length(
level), padding='post')
def encode_output(self, sequences: np.array) ->np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) ->str:
target_sentence = [self.tokenizer_pair.target_index_word.get(
word_index, '') for word_index in sequence]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) ->np.ndarray:
return pad_sequences(self.tokenizer_pair['source'].
texts_to_sequences([sentence]), self.max_sentence_length(
'source'), padding='post')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseDataset(SourceTargetMixin):
def __init__(self, source: Union[Iterable, Sized], target: Union[
Iterable, Sized], shuffle: bool=True, seed: int=42):
self.source = source
self.target = target
self._validate()
if shuffle:
self.shuffle(seed)
def _validate(self) ->None:
src_len = len(self.source)
target_len = len(self.target)
if src_len != target_len:
raise TypeError(
'Number of source rows ({}) does not match the number of target rows ({})'
.format(src_len, target_len))
def shuffle(self, seed: int=42) ->None:
np.random.seed(seed)
shuffled_indexes = np.random.permutation(len(self.source))
self.source = self.source[shuffled_indexes]
self.target = self.target[shuffled_indexes]
class TokenizerPair(SourceTargetMixin):
def __init__(self, tokenizer_class=Tokenizer):
self.source = tokenizer_class()
self.target = tokenizer_class()
@property
def is_tokenized(self) ->bool:
return hasattr(self.source, 'word_index') and hasattr(self.target,
'word_index')
@cached_property
def target_index_word(self):
return {v: k for k, v in self.target.word_index.items()}
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized], shuffle: bool=True,
word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) ->int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) ->int:
return self.max_sentence_length('target')
@property
def source_vocab_size(self) ->int:
return self.tokenizer_pair.source.num_words
@property
def target_vocab_size(self) ->int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) ->int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
def max_sentence_length(self, level: str) ->int:
return max(len(line.split()) for line in self[level])
def tokenize(self) ->None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
self.tokenizer_pair['source'].num_words = len([word for word,
count in self.tokenizer_pair['source'].word_counts.items() if
count > self.word_frequency_threshold - 1])
self.tokenizer_pair['target'].num_words = len([word for word,
count in self.tokenizer_pair['target'].word_counts.items() if
count > self.word_frequency_threshold - 1])
def get_sequences(self, level: str) ->np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(sentences, maxlen=self.max_sentence_length(
level), padding='post')
def encode_output(self, sequences: np.array) ->np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) ->str:
target_sentence = [self.tokenizer_pair.target_index_word.get(
word_index, '') for word_index in sequence]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) ->np.ndarray:
return pad_sequences(self.tokenizer_pair['source'].
texts_to_sequences([sentence]), self.max_sentence_length(
'source'), padding='post')
<|reserved_special_token_1|>
from collections import defaultdict
from typing import Union, Iterable, Sized
import numpy as np
from cached_property import cached_property
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
class SourceTargetMixin:
"""
Allows subscription with 'source' and 'target' keywords
"""
def __getitem__(self, item):
if item in ['source', 'target']:
return getattr(self, item)
raise TypeError('Subscription is available '
'only with "source" and "target" keywords')
class BaseDataset(SourceTargetMixin):
def __init__(self, source: Union[Iterable, Sized],
target: Union[Iterable, Sized],
shuffle: bool=True, seed: int=42):
self.source = source
self.target = target
self._validate()
if shuffle:
self.shuffle(seed)
def _validate(self) -> None:
src_len = len(self.source)
target_len = len(self.target)
if src_len != target_len:
raise TypeError('Number of source rows ({}) does not match '
'the number of target rows ({})'.format(src_len,
target_len))
def shuffle(self, seed: int=42) -> None:
np.random.seed(seed)
shuffled_indexes = np.random.permutation(len(self.source))
self.source = self.source[shuffled_indexes]
self.target = self.target[shuffled_indexes]
class TokenizerPair(SourceTargetMixin):
def __init__(self, tokenizer_class=Tokenizer):
self.source = tokenizer_class()
self.target = tokenizer_class()
@property
def is_tokenized(self) -> bool:
return hasattr(self.source, 'word_index') \
and hasattr(self.target, 'word_index')
@cached_property
def target_index_word(self):
return {v: k for k, v in self.target.word_index.items()}
class TextDataset(BaseDataset):
def __init__(self, source_sentences: Union[Iterable, Sized],
target_sentences: Union[Iterable, Sized],
shuffle: bool=True, word_frequency_threshold: int=2):
super().__init__(source_sentences, target_sentences, shuffle)
self.word_frequency_threshold = word_frequency_threshold
self.tokenizer_pair = TokenizerPair()
@cached_property
def translation_references(self):
references = defaultdict(list)
for idx, sentence in enumerate(self.source):
split_sentence = text_to_word_sequence(self.target[idx])
references[sentence].append(split_sentence)
return references
@property
def source_max_sentence_length(self) -> int:
return self.max_sentence_length('source')
@property
def target_max_sentence_length(self) -> int:
return self.max_sentence_length('target')
@property
def source_vocab_size(self) -> int:
return self.tokenizer_pair.source.num_words
@property
def target_vocab_size(self) -> int:
return self.tokenizer_pair.target.num_words
def get_vocab_size(self, level: str) -> int:
if not self.tokenizer_pair.is_tokenized:
raise ValueError('Dataset has not been tokenized yet')
return len(self.tokenizer_pair[level].word_index) + 1
def max_sentence_length(self, level: str) -> int:
return max(len(line.split()) for line in self[level])
def tokenize(self) -> None:
if not self.tokenizer_pair.is_tokenized:
self.tokenizer_pair['source'].fit_on_texts(self.source)
self.tokenizer_pair['target'].fit_on_texts(self.target)
# limit number of words returned from tokenizer
# according to frequency threshold
self.tokenizer_pair['source'].num_words = len(
[word for word, count
in self.tokenizer_pair['source'].word_counts.items()
if count > self.word_frequency_threshold - 1]
)
self.tokenizer_pair['target'].num_words = len(
[word for word, count
in self.tokenizer_pair['target'].word_counts.items()
if count > self.word_frequency_threshold - 1]
)
def get_sequences(self, level: str) -> np.ndarray:
if not self.tokenizer_pair.is_tokenized:
self.tokenize()
sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])
return pad_sequences(
sentences, maxlen=self.max_sentence_length(level), padding='post'
)
def encode_output(self, sequences: np.array) -> np.array:
return to_categorical(sequences, self.target_vocab_size)
def sequence_to_sentence(self, sequence: Iterable) -> str:
target_sentence = [
self.tokenizer_pair.target_index_word.get(word_index, '')
for word_index in sequence
]
return ' '.join(target_sentence)
def sentence_to_sequence(self, sentence: str) -> np.ndarray:
return pad_sequences(
self.tokenizer_pair['source'].texts_to_sequences([sentence]),
self.max_sentence_length('source'), padding='post'
)
|
flexible
|
{
"blob_id": "e5d7cc65041d65f915d4882b4fdad5bebf79a067",
"index": 204,
"step-1": "<mask token>\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n <mask token>\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n <mask token>\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-2": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-3": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n\n def __init__(self, source: Union[Iterable, Sized], target: Union[\n Iterable, Sized], shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n <mask token>\n <mask token>\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-4": "<mask token>\n\n\nclass BaseDataset(SourceTargetMixin):\n\n def __init__(self, source: Union[Iterable, Sized], target: Union[\n Iterable, Sized], shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n\n def _validate(self) ->None:\n src_len = len(self.source)\n target_len = len(self.target)\n if src_len != target_len:\n raise TypeError(\n 'Number of source rows ({}) does not match the number of target rows ({})'\n .format(src_len, target_len))\n\n def shuffle(self, seed: int=42) ->None:\n np.random.seed(seed)\n shuffled_indexes = np.random.permutation(len(self.source))\n self.source = self.source[shuffled_indexes]\n self.target = self.target[shuffled_indexes]\n\n\nclass TokenizerPair(SourceTargetMixin):\n\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) ->bool:\n return hasattr(self.source, 'word_index') and hasattr(self.target,\n 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized], shuffle: bool=True,\n word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) ->int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) ->int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) ->int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) ->int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) ->int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) ->int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) ->None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n self.tokenizer_pair['source'].num_words = len([word for word,\n count in self.tokenizer_pair['source'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n self.tokenizer_pair['target'].num_words = len([word for word,\n count in self.tokenizer_pair['target'].word_counts.items() if\n count > self.word_frequency_threshold - 1])\n\n def get_sequences(self, level: str) ->np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n return pad_sequences(sentences, maxlen=self.max_sentence_length(\n level), padding='post')\n\n def encode_output(self, sequences: np.array) ->np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) ->str:\n target_sentence = [self.tokenizer_pair.target_index_word.get(\n word_index, '') for word_index in sequence]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) ->np.ndarray:\n return pad_sequences(self.tokenizer_pair['source'].\n texts_to_sequences([sentence]), self.max_sentence_length(\n 'source'), padding='post')\n",
"step-5": "from collections import defaultdict\nfrom typing import Union, Iterable, Sized\n\nimport numpy as np\nfrom cached_property import cached_property\nfrom keras.utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\n\n\nclass SourceTargetMixin:\n \"\"\"\n Allows subscription with 'source' and 'target' keywords\n \"\"\"\n def __getitem__(self, item):\n if item in ['source', 'target']:\n return getattr(self, item)\n raise TypeError('Subscription is available '\n 'only with \"source\" and \"target\" keywords')\n\n\nclass BaseDataset(SourceTargetMixin):\n def __init__(self, source: Union[Iterable, Sized],\n target: Union[Iterable, Sized],\n shuffle: bool=True, seed: int=42):\n self.source = source\n self.target = target\n self._validate()\n if shuffle:\n self.shuffle(seed)\n\n def _validate(self) -> None:\n src_len = len(self.source)\n target_len = len(self.target)\n if src_len != target_len:\n raise TypeError('Number of source rows ({}) does not match '\n 'the number of target rows ({})'.format(src_len,\n target_len))\n\n def shuffle(self, seed: int=42) -> None:\n np.random.seed(seed)\n shuffled_indexes = np.random.permutation(len(self.source))\n self.source = self.source[shuffled_indexes]\n self.target = self.target[shuffled_indexes]\n\n\nclass TokenizerPair(SourceTargetMixin):\n def __init__(self, tokenizer_class=Tokenizer):\n self.source = tokenizer_class()\n self.target = tokenizer_class()\n\n @property\n def is_tokenized(self) -> bool:\n return hasattr(self.source, 'word_index') \\\n and hasattr(self.target, 'word_index')\n\n @cached_property\n def target_index_word(self):\n return {v: k for k, v in self.target.word_index.items()}\n\n\nclass TextDataset(BaseDataset):\n def __init__(self, source_sentences: Union[Iterable, Sized],\n target_sentences: Union[Iterable, Sized],\n shuffle: bool=True, word_frequency_threshold: int=2):\n super().__init__(source_sentences, target_sentences, shuffle)\n\n self.word_frequency_threshold = word_frequency_threshold\n self.tokenizer_pair = TokenizerPair()\n\n @cached_property\n def translation_references(self):\n references = defaultdict(list)\n for idx, sentence in enumerate(self.source):\n split_sentence = text_to_word_sequence(self.target[idx])\n references[sentence].append(split_sentence)\n return references\n\n @property\n def source_max_sentence_length(self) -> int:\n return self.max_sentence_length('source')\n\n @property\n def target_max_sentence_length(self) -> int:\n return self.max_sentence_length('target')\n\n @property\n def source_vocab_size(self) -> int:\n return self.tokenizer_pair.source.num_words\n\n @property\n def target_vocab_size(self) -> int:\n return self.tokenizer_pair.target.num_words\n\n def get_vocab_size(self, level: str) -> int:\n if not self.tokenizer_pair.is_tokenized:\n raise ValueError('Dataset has not been tokenized yet')\n return len(self.tokenizer_pair[level].word_index) + 1\n\n def max_sentence_length(self, level: str) -> int:\n return max(len(line.split()) for line in self[level])\n\n def tokenize(self) -> None:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenizer_pair['source'].fit_on_texts(self.source)\n self.tokenizer_pair['target'].fit_on_texts(self.target)\n\n # limit number of words returned from tokenizer\n # according to frequency threshold\n self.tokenizer_pair['source'].num_words = len(\n [word for word, count\n in self.tokenizer_pair['source'].word_counts.items()\n if count > self.word_frequency_threshold - 1]\n )\n\n self.tokenizer_pair['target'].num_words = len(\n [word for word, count\n in self.tokenizer_pair['target'].word_counts.items()\n if count > self.word_frequency_threshold - 1]\n )\n\n def get_sequences(self, level: str) -> np.ndarray:\n if not self.tokenizer_pair.is_tokenized:\n self.tokenize()\n\n sentences = self.tokenizer_pair[level].texts_to_sequences(self[level])\n\n return pad_sequences(\n sentences, maxlen=self.max_sentence_length(level), padding='post'\n )\n\n def encode_output(self, sequences: np.array) -> np.array:\n return to_categorical(sequences, self.target_vocab_size)\n\n def sequence_to_sentence(self, sequence: Iterable) -> str:\n target_sentence = [\n self.tokenizer_pair.target_index_word.get(word_index, '')\n for word_index in sequence\n ]\n return ' '.join(target_sentence)\n\n def sentence_to_sequence(self, sentence: str) -> np.ndarray:\n return pad_sequences(\n self.tokenizer_pair['source'].texts_to_sequences([sentence]),\n self.max_sentence_length('source'), padding='post'\n )\n",
"step-ids": [
12,
19,
20,
22,
27
]
}
|
[
12,
19,
20,
22,
27
] |
<|reserved_special_token_0|>
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/' % source]
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela',
'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset == 'valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset == 'valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=
'2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
<|reserved_special_token_0|>
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true,
y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan
return percision[1], recall[1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/' % source]
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela',
'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset == 'valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset == 'valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=
'2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
<|reserved_special_token_0|>
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true,
y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan
return percision[1], recall[1]
def gen_fnr_fpr(y_true, y_pred):
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (
tp + fp + fn + tn)
except Exception as e:
return np.nan, np.nan, np.nan
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/' % source]
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela',
'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset == 'valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset == 'valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=
'2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
def evaluate_clf_preformance(y_true, y_pred, y_pred_prob):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
auc = roc_auc_score(y_true, y_pred_prob)
accu = accuracy_score(y_true, y_pred)
f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)
f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)
f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)
f1_pos = f1_score(y_true, y_pred, pos_label=1)
f1s = f1_score(y_true, y_pred, average=None)
f1_real = None
for f in f1s:
if f != f1_pos:
f1_real = f
print(
'auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are',
auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)
return {'auc_score': auc, 'accuracy_score': accu, 'f1_micro': f1_micro,
'f1_macro': f1_macro, 'f1_weighted': f1_weighted, 'f1_fake': f1_pos,
'f1_real': f1_real, 'tn': tn, 'fp': fp, 'fn': fn, 'tp': tp}
<|reserved_special_token_0|>
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true,
y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan
return percision[1], recall[1]
def gen_fnr_fpr(y_true, y_pred):
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (
tp + fp + fn + tn)
except Exception as e:
return np.nan, np.nan, np.nan
<|reserved_special_token_1|>
import random as rnd
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
import os
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/' % source]
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela',
'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset == 'valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset == 'valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=
'2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
def evaluate_clf_preformance(y_true, y_pred, y_pred_prob):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
auc = roc_auc_score(y_true, y_pred_prob)
accu = accuracy_score(y_true, y_pred)
f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)
f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)
f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)
f1_pos = f1_score(y_true, y_pred, pos_label=1)
f1s = f1_score(y_true, y_pred, average=None)
f1_real = None
for f in f1s:
if f != f1_pos:
f1_real = f
print(
'auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are',
auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)
return {'auc_score': auc, 'accuracy_score': accu, 'f1_micro': f1_micro,
'f1_macro': f1_macro, 'f1_weighted': f1_weighted, 'f1_fake': f1_pos,
'f1_real': f1_real, 'tn': tn, 'fp': fp, 'fn': fn, 'tp': tp}
import numpy as np
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true,
y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan
return percision[1], recall[1]
def gen_fnr_fpr(y_true, y_pred):
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (
tp + fp + fn + tn)
except Exception as e:
return np.nan, np.nan, np.nan
<|reserved_special_token_1|>
import random as rnd
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support, roc_auc_score
import os
def mkdir_tree(source):
if source is None:
source = 'default'
base_dirs = ['../data/clf_meta/%s/'%source]
# hostname = socket.gethostname()
# print('hostname is', hostname)
# if 'arc-ts.umich.edu' in hostname:
# base_dirs.append('/scratch/cbudak_root/cbudak/lbozarth/fakenews/data/clf_meta/%s'%source)
print('base_dirsssssss', base_dirs)
for base_dir in base_dirs:
if not os.path.exists(base_dir):
print('mkdir', base_dir)
os.mkdir(base_dir)
if source == 'RDEL':
subdirs = ['models', 'preds', 'features', 'vectorizers']
else:
subdirs = ['models', 'preds']
datasets = ['default', 'events', 'increment', 'nela', 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']
datasets2 = ['default', 'nela', 'fakenewscorpus']
datasets3 = ['bydomains', 'byforecast', 'basic']
for d in subdirs:
sub_dir = os.path.join(base_dir, d)
if not os.path.exists(sub_dir):
print('mkdir', sub_dir)
os.mkdir(sub_dir)
for dataset in datasets:
dataset_path = os.path.join(sub_dir, dataset)
if not os.path.exists(dataset_path):
print('mkdir', dataset_path)
os.mkdir(dataset_path)
if dataset == 'increment' or dataset=='valarch':
for ds2 in datasets2:
ds2_path = os.path.join(dataset_path, ds2)
if not os.path.exists(ds2_path):
print('mkdir', ds2_path)
os.mkdir(ds2_path)
if dataset=='valarch':
for ds3 in datasets3:
ds3_path = os.path.join(ds2_path, ds3)
if not os.path.exists(ds3_path):
print('mkdir', ds3_path)
os.mkdir(ds3_path)
print('finished making directory tree')
return
def gen_rand_dates(dataset="default", start_date='2016-06-15', end_date='2016-12-30', n=10):
rand_dates = []
for i in range(n):
dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))
rand_dates.append(dt)
print(sorted(rand_dates))
return rand_dates
def evaluate_clf_preformance(y_true, y_pred, y_pred_prob):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
auc = roc_auc_score(y_true, y_pred_prob)
accu = accuracy_score(y_true, y_pred)
f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)
f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)
f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)
f1_pos = f1_score(y_true, y_pred, pos_label=1)
f1s = f1_score(y_true, y_pred, average=None)
f1_real = None
for f in f1s:
if f!=f1_pos:
f1_real = f
print('auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are', auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)
return {'auc_score':auc, 'accuracy_score':accu, 'f1_micro':f1_micro, 'f1_macro':f1_macro, 'f1_weighted':f1_weighted,
'f1_fake':f1_pos, 'f1_real':f1_real, 'tn':tn, 'fp':fp, 'fn':fn, 'tp':tp}
import numpy as np
def gen_precision_recall(y_true, y_pred):
percision, recall, f1s, _ = precision_recall_fscore_support(y_true, y_pred, average=None, pos_label=1)
if len(f1s) == 1:
return np.nan, np.nan # [1][1]; too few values
return percision[1], recall[1] #for fake only
def gen_fnr_fpr(y_true, y_pred):
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
# # Sensitivity, hit rate, recall, or true positive rate
# TPR = TP / (TP + FN)
# # Specificity or true negative rate
# TNR = TN / (TN + FP)
# # Precision or positive predictive value
# PPV = TP / (TP + FP)
# # Negative predictive value
# NPV = TN / (TN + FN)
# # Fall out or false positive rate
# FPR = FP / (FP + TN)
# # False negative rate
# FNR = FN / (TP + FN)
# # False discovery rate
# FDR = FP / (TP + FP)
return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (tp + fp + fn + tn)
except Exception as e:
return np.nan, np.nan, np.nan # [1][1]; too few values
|
flexible
|
{
"blob_id": "11ca13aca699b1e0744243645b3dbcbb0dacdb7e",
"index": 9588,
"step-1": "<mask token>\n\n\ndef mkdir_tree(source):\n if source is None:\n source = 'default'\n base_dirs = ['../data/clf_meta/%s/' % source]\n print('base_dirsssssss', base_dirs)\n for base_dir in base_dirs:\n if not os.path.exists(base_dir):\n print('mkdir', base_dir)\n os.mkdir(base_dir)\n if source == 'RDEL':\n subdirs = ['models', 'preds', 'features', 'vectorizers']\n else:\n subdirs = ['models', 'preds']\n datasets = ['default', 'events', 'increment', 'nela',\n 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']\n datasets2 = ['default', 'nela', 'fakenewscorpus']\n datasets3 = ['bydomains', 'byforecast', 'basic']\n for d in subdirs:\n sub_dir = os.path.join(base_dir, d)\n if not os.path.exists(sub_dir):\n print('mkdir', sub_dir)\n os.mkdir(sub_dir)\n for dataset in datasets:\n dataset_path = os.path.join(sub_dir, dataset)\n if not os.path.exists(dataset_path):\n print('mkdir', dataset_path)\n os.mkdir(dataset_path)\n if dataset == 'increment' or dataset == 'valarch':\n for ds2 in datasets2:\n ds2_path = os.path.join(dataset_path, ds2)\n if not os.path.exists(ds2_path):\n print('mkdir', ds2_path)\n os.mkdir(ds2_path)\n if dataset == 'valarch':\n for ds3 in datasets3:\n ds3_path = os.path.join(ds2_path, ds3)\n if not os.path.exists(ds3_path):\n print('mkdir', ds3_path)\n os.mkdir(ds3_path)\n print('finished making directory tree')\n return\n\n\ndef gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=\n '2016-12-30', n=10):\n rand_dates = []\n for i in range(n):\n dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))\n rand_dates.append(dt)\n print(sorted(rand_dates))\n return rand_dates\n\n\n<mask token>\n\n\ndef gen_precision_recall(y_true, y_pred):\n percision, recall, f1s, _ = precision_recall_fscore_support(y_true,\n y_pred, average=None, pos_label=1)\n if len(f1s) == 1:\n return np.nan, np.nan\n return percision[1], recall[1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mkdir_tree(source):\n if source is None:\n source = 'default'\n base_dirs = ['../data/clf_meta/%s/' % source]\n print('base_dirsssssss', base_dirs)\n for base_dir in base_dirs:\n if not os.path.exists(base_dir):\n print('mkdir', base_dir)\n os.mkdir(base_dir)\n if source == 'RDEL':\n subdirs = ['models', 'preds', 'features', 'vectorizers']\n else:\n subdirs = ['models', 'preds']\n datasets = ['default', 'events', 'increment', 'nela',\n 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']\n datasets2 = ['default', 'nela', 'fakenewscorpus']\n datasets3 = ['bydomains', 'byforecast', 'basic']\n for d in subdirs:\n sub_dir = os.path.join(base_dir, d)\n if not os.path.exists(sub_dir):\n print('mkdir', sub_dir)\n os.mkdir(sub_dir)\n for dataset in datasets:\n dataset_path = os.path.join(sub_dir, dataset)\n if not os.path.exists(dataset_path):\n print('mkdir', dataset_path)\n os.mkdir(dataset_path)\n if dataset == 'increment' or dataset == 'valarch':\n for ds2 in datasets2:\n ds2_path = os.path.join(dataset_path, ds2)\n if not os.path.exists(ds2_path):\n print('mkdir', ds2_path)\n os.mkdir(ds2_path)\n if dataset == 'valarch':\n for ds3 in datasets3:\n ds3_path = os.path.join(ds2_path, ds3)\n if not os.path.exists(ds3_path):\n print('mkdir', ds3_path)\n os.mkdir(ds3_path)\n print('finished making directory tree')\n return\n\n\ndef gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=\n '2016-12-30', n=10):\n rand_dates = []\n for i in range(n):\n dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))\n rand_dates.append(dt)\n print(sorted(rand_dates))\n return rand_dates\n\n\n<mask token>\n\n\ndef gen_precision_recall(y_true, y_pred):\n percision, recall, f1s, _ = precision_recall_fscore_support(y_true,\n y_pred, average=None, pos_label=1)\n if len(f1s) == 1:\n return np.nan, np.nan\n return percision[1], recall[1]\n\n\ndef gen_fnr_fpr(y_true, y_pred):\n try:\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (\n tp + fp + fn + tn)\n except Exception as e:\n return np.nan, np.nan, np.nan\n",
"step-3": "<mask token>\n\n\ndef mkdir_tree(source):\n if source is None:\n source = 'default'\n base_dirs = ['../data/clf_meta/%s/' % source]\n print('base_dirsssssss', base_dirs)\n for base_dir in base_dirs:\n if not os.path.exists(base_dir):\n print('mkdir', base_dir)\n os.mkdir(base_dir)\n if source == 'RDEL':\n subdirs = ['models', 'preds', 'features', 'vectorizers']\n else:\n subdirs = ['models', 'preds']\n datasets = ['default', 'events', 'increment', 'nela',\n 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']\n datasets2 = ['default', 'nela', 'fakenewscorpus']\n datasets3 = ['bydomains', 'byforecast', 'basic']\n for d in subdirs:\n sub_dir = os.path.join(base_dir, d)\n if not os.path.exists(sub_dir):\n print('mkdir', sub_dir)\n os.mkdir(sub_dir)\n for dataset in datasets:\n dataset_path = os.path.join(sub_dir, dataset)\n if not os.path.exists(dataset_path):\n print('mkdir', dataset_path)\n os.mkdir(dataset_path)\n if dataset == 'increment' or dataset == 'valarch':\n for ds2 in datasets2:\n ds2_path = os.path.join(dataset_path, ds2)\n if not os.path.exists(ds2_path):\n print('mkdir', ds2_path)\n os.mkdir(ds2_path)\n if dataset == 'valarch':\n for ds3 in datasets3:\n ds3_path = os.path.join(ds2_path, ds3)\n if not os.path.exists(ds3_path):\n print('mkdir', ds3_path)\n os.mkdir(ds3_path)\n print('finished making directory tree')\n return\n\n\ndef gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=\n '2016-12-30', n=10):\n rand_dates = []\n for i in range(n):\n dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))\n rand_dates.append(dt)\n print(sorted(rand_dates))\n return rand_dates\n\n\ndef evaluate_clf_preformance(y_true, y_pred, y_pred_prob):\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n auc = roc_auc_score(y_true, y_pred_prob)\n accu = accuracy_score(y_true, y_pred)\n f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)\n f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)\n f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)\n f1_pos = f1_score(y_true, y_pred, pos_label=1)\n f1s = f1_score(y_true, y_pred, average=None)\n f1_real = None\n for f in f1s:\n if f != f1_pos:\n f1_real = f\n print(\n 'auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are',\n auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)\n return {'auc_score': auc, 'accuracy_score': accu, 'f1_micro': f1_micro,\n 'f1_macro': f1_macro, 'f1_weighted': f1_weighted, 'f1_fake': f1_pos,\n 'f1_real': f1_real, 'tn': tn, 'fp': fp, 'fn': fn, 'tp': tp}\n\n\n<mask token>\n\n\ndef gen_precision_recall(y_true, y_pred):\n percision, recall, f1s, _ = precision_recall_fscore_support(y_true,\n y_pred, average=None, pos_label=1)\n if len(f1s) == 1:\n return np.nan, np.nan\n return percision[1], recall[1]\n\n\ndef gen_fnr_fpr(y_true, y_pred):\n try:\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (\n tp + fp + fn + tn)\n except Exception as e:\n return np.nan, np.nan, np.nan\n",
"step-4": "import random as rnd\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support, roc_auc_score\nimport os\n\n\ndef mkdir_tree(source):\n if source is None:\n source = 'default'\n base_dirs = ['../data/clf_meta/%s/' % source]\n print('base_dirsssssss', base_dirs)\n for base_dir in base_dirs:\n if not os.path.exists(base_dir):\n print('mkdir', base_dir)\n os.mkdir(base_dir)\n if source == 'RDEL':\n subdirs = ['models', 'preds', 'features', 'vectorizers']\n else:\n subdirs = ['models', 'preds']\n datasets = ['default', 'events', 'increment', 'nela',\n 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']\n datasets2 = ['default', 'nela', 'fakenewscorpus']\n datasets3 = ['bydomains', 'byforecast', 'basic']\n for d in subdirs:\n sub_dir = os.path.join(base_dir, d)\n if not os.path.exists(sub_dir):\n print('mkdir', sub_dir)\n os.mkdir(sub_dir)\n for dataset in datasets:\n dataset_path = os.path.join(sub_dir, dataset)\n if not os.path.exists(dataset_path):\n print('mkdir', dataset_path)\n os.mkdir(dataset_path)\n if dataset == 'increment' or dataset == 'valarch':\n for ds2 in datasets2:\n ds2_path = os.path.join(dataset_path, ds2)\n if not os.path.exists(ds2_path):\n print('mkdir', ds2_path)\n os.mkdir(ds2_path)\n if dataset == 'valarch':\n for ds3 in datasets3:\n ds3_path = os.path.join(ds2_path, ds3)\n if not os.path.exists(ds3_path):\n print('mkdir', ds3_path)\n os.mkdir(ds3_path)\n print('finished making directory tree')\n return\n\n\ndef gen_rand_dates(dataset='default', start_date='2016-06-15', end_date=\n '2016-12-30', n=10):\n rand_dates = []\n for i in range(n):\n dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))\n rand_dates.append(dt)\n print(sorted(rand_dates))\n return rand_dates\n\n\ndef evaluate_clf_preformance(y_true, y_pred, y_pred_prob):\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n auc = roc_auc_score(y_true, y_pred_prob)\n accu = accuracy_score(y_true, y_pred)\n f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)\n f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)\n f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)\n f1_pos = f1_score(y_true, y_pred, pos_label=1)\n f1s = f1_score(y_true, y_pred, average=None)\n f1_real = None\n for f in f1s:\n if f != f1_pos:\n f1_real = f\n print(\n 'auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are',\n auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)\n return {'auc_score': auc, 'accuracy_score': accu, 'f1_micro': f1_micro,\n 'f1_macro': f1_macro, 'f1_weighted': f1_weighted, 'f1_fake': f1_pos,\n 'f1_real': f1_real, 'tn': tn, 'fp': fp, 'fn': fn, 'tp': tp}\n\n\nimport numpy as np\n\n\ndef gen_precision_recall(y_true, y_pred):\n percision, recall, f1s, _ = precision_recall_fscore_support(y_true,\n y_pred, average=None, pos_label=1)\n if len(f1s) == 1:\n return np.nan, np.nan\n return percision[1], recall[1]\n\n\ndef gen_fnr_fpr(y_true, y_pred):\n try:\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (\n tp + fp + fn + tn)\n except Exception as e:\n return np.nan, np.nan, np.nan\n",
"step-5": "import random as rnd\n\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support, roc_auc_score\nimport os\n\ndef mkdir_tree(source):\n if source is None:\n source = 'default'\n base_dirs = ['../data/clf_meta/%s/'%source]\n\n # hostname = socket.gethostname()\n # print('hostname is', hostname)\n # if 'arc-ts.umich.edu' in hostname:\n # base_dirs.append('/scratch/cbudak_root/cbudak/lbozarth/fakenews/data/clf_meta/%s'%source)\n print('base_dirsssssss', base_dirs)\n for base_dir in base_dirs:\n if not os.path.exists(base_dir):\n print('mkdir', base_dir)\n os.mkdir(base_dir)\n\n if source == 'RDEL':\n subdirs = ['models', 'preds', 'features', 'vectorizers']\n else:\n subdirs = ['models', 'preds']\n\n datasets = ['default', 'events', 'increment', 'nela', 'fakenewscorpus', 'forecast', 'events_v2', 'valarch']\n datasets2 = ['default', 'nela', 'fakenewscorpus']\n datasets3 = ['bydomains', 'byforecast', 'basic']\n for d in subdirs:\n sub_dir = os.path.join(base_dir, d)\n if not os.path.exists(sub_dir):\n print('mkdir', sub_dir)\n os.mkdir(sub_dir)\n for dataset in datasets:\n dataset_path = os.path.join(sub_dir, dataset)\n if not os.path.exists(dataset_path):\n print('mkdir', dataset_path)\n os.mkdir(dataset_path)\n if dataset == 'increment' or dataset=='valarch':\n for ds2 in datasets2:\n ds2_path = os.path.join(dataset_path, ds2)\n if not os.path.exists(ds2_path):\n print('mkdir', ds2_path)\n os.mkdir(ds2_path)\n if dataset=='valarch':\n for ds3 in datasets3:\n ds3_path = os.path.join(ds2_path, ds3)\n if not os.path.exists(ds3_path):\n print('mkdir', ds3_path)\n os.mkdir(ds3_path)\n\n print('finished making directory tree')\n return\n\ndef gen_rand_dates(dataset=\"default\", start_date='2016-06-15', end_date='2016-12-30', n=10):\n rand_dates = []\n for i in range(n):\n dt = pd.to_datetime(rnd.choice(pd.bdate_range(start_date, end_date)))\n rand_dates.append(dt)\n print(sorted(rand_dates))\n return rand_dates\n\ndef evaluate_clf_preformance(y_true, y_pred, y_pred_prob):\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n auc = roc_auc_score(y_true, y_pred_prob)\n accu = accuracy_score(y_true, y_pred)\n f1_macro = f1_score(y_true, y_pred, average='macro', pos_label=1)\n f1_micro = f1_score(y_true, y_pred, average='micro', pos_label=1)\n f1_weighted = f1_score(y_true, y_pred, average='weighted', pos_label=1)\n f1_pos = f1_score(y_true, y_pred, pos_label=1)\n f1s = f1_score(y_true, y_pred, average=None)\n f1_real = None\n for f in f1s:\n if f!=f1_pos:\n f1_real = f\n print('auc, accuracy, f1_micro, f1_macro, f1_weighted, f1_fake, f1_real are', auc, accu, f1_micro, f1_macro, f1_weighted, f1_pos, f1_real)\n return {'auc_score':auc, 'accuracy_score':accu, 'f1_micro':f1_micro, 'f1_macro':f1_macro, 'f1_weighted':f1_weighted,\n 'f1_fake':f1_pos, 'f1_real':f1_real, 'tn':tn, 'fp':fp, 'fn':fn, 'tp':tp}\n\nimport numpy as np\ndef gen_precision_recall(y_true, y_pred):\n percision, recall, f1s, _ = precision_recall_fscore_support(y_true, y_pred, average=None, pos_label=1)\n if len(f1s) == 1:\n return np.nan, np.nan # [1][1]; too few values\n return percision[1], recall[1] #for fake only\n\ndef gen_fnr_fpr(y_true, y_pred):\n try:\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n # # Sensitivity, hit rate, recall, or true positive rate\n # TPR = TP / (TP + FN)\n # # Specificity or true negative rate\n # TNR = TN / (TN + FP)\n # # Precision or positive predictive value\n # PPV = TP / (TP + FP)\n # # Negative predictive value\n # NPV = TN / (TN + FN)\n # # Fall out or false positive rate\n # FPR = FP / (FP + TN)\n # # False negative rate\n # FNR = FN / (TP + FN)\n # # False discovery rate\n # FDR = FP / (TP + FP)\n return 1.0 * tn / (fp + tn), 1.0 * tp / (fn + tp), 1.0 * (tp + tn) / (tp + fp + fn + tn)\n except Exception as e:\n return np.nan, np.nan, np.nan # [1][1]; too few values",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#coding=utf-8
import pandas as pd
# 学生成绩表
df_grade = pd.read_excel("学生成绩表.xlsx")
df_grade.head()
# 学生信息表
df_sinfo = pd.read_excel("学生信息表.xlsx")
df_sinfo.head()
# 只筛选第二个表的少量的列
df_sinfo = df_sinfo[["学号", "姓名", "性别"]]
df_sinfo.head()
# join
df_merge = pd.merge(left=df_grade, right=df_sinfo, left_on="学号", right_on="学号")
df_merge.head()
# 将columns变成python的列表形式
new_columns = df_merge.columns.to_list()
# 按逆序insert,会将"姓名"/"性别"放到"学号"的后面
for name in ["姓名", "性别"][::-1]:
new_columns.remove(name)
new_columns.insert(new_columns.index("学号")+1, name)
df_merge = df_merge.reindex(columns=new_columns)
df_merge.head()
df_merge.to_excel("合并后的数据表.xlsx", index=False)
|
normal
|
{
"blob_id": "f6c48731b2a4e0a6f1f93034ee9d11121c2d0427",
"index": 6810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf_grade.head()\n<mask token>\ndf_sinfo.head()\n<mask token>\ndf_sinfo.head()\n<mask token>\ndf_merge.head()\n<mask token>\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\n<mask token>\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-3": "<mask token>\ndf_grade = pd.read_excel('学生成绩表.xlsx')\ndf_grade.head()\ndf_sinfo = pd.read_excel('学生信息表.xlsx')\ndf_sinfo.head()\ndf_sinfo = df_sinfo[['学号', '姓名', '性别']]\ndf_sinfo.head()\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')\ndf_merge.head()\nnew_columns = df_merge.columns.to_list()\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-4": "import pandas as pd\ndf_grade = pd.read_excel('学生成绩表.xlsx')\ndf_grade.head()\ndf_sinfo = pd.read_excel('学生信息表.xlsx')\ndf_sinfo.head()\ndf_sinfo = df_sinfo[['学号', '姓名', '性别']]\ndf_sinfo.head()\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')\ndf_merge.head()\nnew_columns = df_merge.columns.to_list()\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-5": "#coding=utf-8\nimport pandas as pd\n\n# 学生成绩表\ndf_grade = pd.read_excel(\"学生成绩表.xlsx\") \ndf_grade.head()\n\n# 学生信息表\ndf_sinfo = pd.read_excel(\"学生信息表.xlsx\") \ndf_sinfo.head()\n\n# 只筛选第二个表的少量的列\ndf_sinfo = df_sinfo[[\"学号\", \"姓名\", \"性别\"]]\ndf_sinfo.head()\n\n# join\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on=\"学号\", right_on=\"学号\")\ndf_merge.head()\n\n# 将columns变成python的列表形式\nnew_columns = df_merge.columns.to_list()\n\n# 按逆序insert,会将\"姓名\"/\"性别\"放到\"学号\"的后面\nfor name in [\"姓名\", \"性别\"][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index(\"学号\")+1, name)\n\n\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\n\ndf_merge.to_excel(\"合并后的数据表.xlsx\", index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright 2014 Charles Noneman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for running the test modules"""
from __future__ import print_function
import importlib
import pkgutil
import unittest
import test
def run():
"""Runs all of the tests"""
subsuite_list = []
for _, modname, _ in pkgutil.iter_modules(test.__path__):
if modname.startswith("test_"):
module = importlib.import_module('test.' + modname)
subsuite = unittest.TestLoader().loadTestsFromModule(module)
subsuite_list.append(subsuite)
suite = unittest.TestSuite(subsuite_list)
print("Testing:\n")
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "9a7908212bf13565109cd4d9ab6de65909bc6910",
"index": 3606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport importlib\nimport pkgutil\nimport unittest\nimport test\n\n\ndef run():\n \"\"\"Runs all of the tests\"\"\"\n subsuite_list = []\n for _, modname, _ in pkgutil.iter_modules(test.__path__):\n if modname.startswith('test_'):\n module = importlib.import_module('test.' + modname)\n subsuite = unittest.TestLoader().loadTestsFromModule(module)\n subsuite_list.append(subsuite)\n suite = unittest.TestSuite(subsuite_list)\n print('Testing:\\n')\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "# Copyright 2014 Charles Noneman\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test suite for running the test modules\"\"\"\n\nfrom __future__ import print_function\n\nimport importlib\nimport pkgutil\nimport unittest\nimport test\n\ndef run():\n\t\"\"\"Runs all of the tests\"\"\"\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)\n\nif __name__ == '__main__':\n\trun()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
<|reserved_special_token_1|>
from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from status.api.serializers import StatusInlineUserSerializer
from status.api.views import StatusAPIView
from status.models import Status
from .serializers import UserDetailSerializer
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = 'id',
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get('username')
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({'detail': 'Not allowed here'})
<|reserved_special_token_1|>
from django.contrib.auth import get_user_model
from rest_framework import generics
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from status.api.serializers import StatusInlineUserSerializer
from status.api.views import StatusAPIView
from status.models import Status
from .serializers import UserDetailSerializer
User = get_user_model()
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER
class UserDetailAPIView(generics.RetrieveAPIView):
queryset = User.objects.filter(is_active=True)
serializer_class = UserDetailSerializer
lookup_field = 'username'
class UserStatusAPIView(StatusAPIView):
serializer_class = StatusInlineUserSerializer
search_fields = ('id',)
def get_queryset(self, *args, **kwargs):
username = self.kwargs.get("username")
if username is None:
return Status.objects.none()
return Status.objects.filter(user__username=username)
def post(self, request, *args, **kwargs):
return Response({"detail": "Not allowed here"})
|
flexible
|
{
"blob_id": "472a79767f5dc7dc3cd03d89999d322b3885dcbf",
"index": 1220,
"step-1": "<mask token>\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-2": "<mask token>\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-3": "<mask token>\nUser = get_user_model()\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-4": "from django.contrib.auth import get_user_model\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.settings import api_settings\nfrom status.api.serializers import StatusInlineUserSerializer\nfrom status.api.views import StatusAPIView\nfrom status.models import Status\nfrom .serializers import UserDetailSerializer\nUser = get_user_model()\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n search_fields = 'id',\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get('username')\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({'detail': 'Not allowed here'})\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.settings import api_settings\n\nfrom status.api.serializers import StatusInlineUserSerializer\nfrom status.api.views import StatusAPIView\nfrom status.models import Status\n\nfrom .serializers import UserDetailSerializer\n\nUser = get_user_model()\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\njwt_response_payload_handler = api_settings.JWT_RESPONSE_PAYLOAD_HANDLER\n\n\nclass UserDetailAPIView(generics.RetrieveAPIView):\n queryset = User.objects.filter(is_active=True)\n serializer_class = UserDetailSerializer\n lookup_field = 'username'\n\n\nclass UserStatusAPIView(StatusAPIView):\n serializer_class = StatusInlineUserSerializer\n\n search_fields = ('id',)\n\n def get_queryset(self, *args, **kwargs):\n username = self.kwargs.get(\"username\")\n if username is None:\n return Status.objects.none()\n return Status.objects.filter(user__username=username)\n\n def post(self, request, *args, **kwargs):\n return Response({\"detail\": \"Not allowed here\"})\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# Make an array of dictionaries. Each dictionary should have keys:
#
# lat: the latitude
# lon: the longitude
# name: the waypoint name
#
# Make up three entries of various values.
waypoints = [
{ 'lat': 106.72888 },
{ 'lon': 0.69622 },
{ 'name': 'Kepulauan Riau' }
]
# Write a loop that prints out all the field values for all the waypoints
for dict in waypoints:
print(dict)
|
normal
|
{
"blob_id": "5eee3953193e0fc9f44b81059ce66997c22bc8f1",
"index": 6960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor dict in waypoints:\n print(dict)\n",
"step-3": "waypoints = [{'lat': 106.72888}, {'lon': 0.69622}, {'name': 'Kepulauan Riau'}]\nfor dict in waypoints:\n print(dict)\n",
"step-4": "# Make an array of dictionaries. Each dictionary should have keys:\n#\n# lat: the latitude\n# lon: the longitude\n# name: the waypoint name\n#\n# Make up three entries of various values.\n\nwaypoints = [\n { 'lat': 106.72888 },\n { 'lon': 0.69622 },\n { 'name': 'Kepulauan Riau' }\n]\n\n# Write a loop that prints out all the field values for all the waypoints\nfor dict in waypoints:\n print(dict)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
probabilities_by_category = spam_distribution, ham_distribution
return probabilities_by_category
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
probabilities_by_category = spam_distribution, ham_distribution
return probabilities_by_category
def classify_new_email(filename, probabilities_by_category, prior_by_category):
"""
Use Naive Bayes classification to classify the email in the given file.
Inputs
------
filename: name of the file to be classified
probabilities_by_category: output of function learn_distributions
prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the
parameter in the prior class distribution
Output
------
classify_result: A two-element tuple. The first element is a string whose value
is either 'spam' or 'ham' depending on the classification result, and the
second element is a two-element list as [log p(y=1|x), log p(y=0|x)],
representing the log posterior probabilities
"""
spam_distribution = 0
ham_distribution = 0
word_frequency = util.get_word_freq([filename])
for w in word_frequency:
if w in probabilities_by_category[0]:
spam_distribution += word_frequency[w] * np.log(
probabilities_by_category[0][w])
if w in probabilities_by_category[1]:
ham_distribution += word_frequency[w] * np.log(
probabilities_by_category[1][w])
spam_distribution += np.log(prior_by_category[0])
ham_distribution += np.log(prior_by_category[1])
predict = ''
if spam_distribution > ham_distribution:
predict = 'spam'
else:
predict = 'ham'
word_distribution = [spam_distribution, ham_distribution]
classify_result = predict, word_distribution
return classify_result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
probabilities_by_category = spam_distribution, ham_distribution
return probabilities_by_category
def classify_new_email(filename, probabilities_by_category, prior_by_category):
"""
Use Naive Bayes classification to classify the email in the given file.
Inputs
------
filename: name of the file to be classified
probabilities_by_category: output of function learn_distributions
prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the
parameter in the prior class distribution
Output
------
classify_result: A two-element tuple. The first element is a string whose value
is either 'spam' or 'ham' depending on the classification result, and the
second element is a two-element list as [log p(y=1|x), log p(y=0|x)],
representing the log posterior probabilities
"""
spam_distribution = 0
ham_distribution = 0
word_frequency = util.get_word_freq([filename])
for w in word_frequency:
if w in probabilities_by_category[0]:
spam_distribution += word_frequency[w] * np.log(
probabilities_by_category[0][w])
if w in probabilities_by_category[1]:
ham_distribution += word_frequency[w] * np.log(
probabilities_by_category[1][w])
spam_distribution += np.log(prior_by_category[0])
ham_distribution += np.log(prior_by_category[1])
predict = ''
if spam_distribution > ham_distribution:
predict = 'spam'
else:
predict = 'ham'
word_distribution = [spam_distribution, ham_distribution]
classify_result = predict, word_distribution
return classify_result
if __name__ == '__main__':
spam_folder = 'data/spam'
ham_folder = 'data/ham'
test_folder = 'data/testing'
file_lists = []
for folder in (spam_folder, ham_folder):
file_lists.append(util.get_files_in_folder(folder))
probabilities_by_category = learn_distributions(file_lists)
priors_by_category = [0.5, 0.5]
performance_measures = np.zeros([2, 2])
for filename in util.get_files_in_folder(test_folder):
label, log_posterior = classify_new_email(filename,
probabilities_by_category, priors_by_category)
base = os.path.basename(filename)
true_index = 'ham' in base
guessed_index = label == 'ham'
performance_measures[int(true_index), int(guessed_index)] += 1
template = (
'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'
)
correct = np.diag(performance_measures)
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
print('----type 1 and 2 here-----')
offset = [-100.0, -10.0, -1.0, 1.0, 10.0]
type1 = []
type2 = []
for offset_value in offset:
performance_measures = np.zeros([2, 2])
for filename in util.get_files_in_folder(test_folder):
label, log_posterior = classify_new_email(filename,
probabilities_by_category, priors_by_category)
if log_posterior[0] + offset_value > log_posterior[1]:
label = 'spam'
else:
label = 'ham'
base = os.path.basename(filename)
true_index = 'ham' in base
guessed_index = label == 'ham'
performance_measures[int(true_index), int(guessed_index)] += 1
type1.append(performance_measures[0][1])
type2.append(performance_measures[1][0])
template = (
'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'
)
correct = np.diag(performance_measures)
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
plt.title('Type1 vs Type2 Error')
for i in range(0, len(type1)):
plt.scatter(type1[i], type2[i])
plt.xlabel('type1')
plt.ylabel('type2')
plt.legend(offset, loc='best')
plt.show()
<|reserved_special_token_1|>
import os.path
import numpy as np
import matplotlib.pyplot as plt
import util
import collections
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
probabilities_by_category = spam_distribution, ham_distribution
return probabilities_by_category
def classify_new_email(filename, probabilities_by_category, prior_by_category):
"""
Use Naive Bayes classification to classify the email in the given file.
Inputs
------
filename: name of the file to be classified
probabilities_by_category: output of function learn_distributions
prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the
parameter in the prior class distribution
Output
------
classify_result: A two-element tuple. The first element is a string whose value
is either 'spam' or 'ham' depending on the classification result, and the
second element is a two-element list as [log p(y=1|x), log p(y=0|x)],
representing the log posterior probabilities
"""
spam_distribution = 0
ham_distribution = 0
word_frequency = util.get_word_freq([filename])
for w in word_frequency:
if w in probabilities_by_category[0]:
spam_distribution += word_frequency[w] * np.log(
probabilities_by_category[0][w])
if w in probabilities_by_category[1]:
ham_distribution += word_frequency[w] * np.log(
probabilities_by_category[1][w])
spam_distribution += np.log(prior_by_category[0])
ham_distribution += np.log(prior_by_category[1])
predict = ''
if spam_distribution > ham_distribution:
predict = 'spam'
else:
predict = 'ham'
word_distribution = [spam_distribution, ham_distribution]
classify_result = predict, word_distribution
return classify_result
if __name__ == '__main__':
spam_folder = 'data/spam'
ham_folder = 'data/ham'
test_folder = 'data/testing'
file_lists = []
for folder in (spam_folder, ham_folder):
file_lists.append(util.get_files_in_folder(folder))
probabilities_by_category = learn_distributions(file_lists)
priors_by_category = [0.5, 0.5]
performance_measures = np.zeros([2, 2])
for filename in util.get_files_in_folder(test_folder):
label, log_posterior = classify_new_email(filename,
probabilities_by_category, priors_by_category)
base = os.path.basename(filename)
true_index = 'ham' in base
guessed_index = label == 'ham'
performance_measures[int(true_index), int(guessed_index)] += 1
template = (
'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'
)
correct = np.diag(performance_measures)
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
print('----type 1 and 2 here-----')
offset = [-100.0, -10.0, -1.0, 1.0, 10.0]
type1 = []
type2 = []
for offset_value in offset:
performance_measures = np.zeros([2, 2])
for filename in util.get_files_in_folder(test_folder):
label, log_posterior = classify_new_email(filename,
probabilities_by_category, priors_by_category)
if log_posterior[0] + offset_value > log_posterior[1]:
label = 'spam'
else:
label = 'ham'
base = os.path.basename(filename)
true_index = 'ham' in base
guessed_index = label == 'ham'
performance_measures[int(true_index), int(guessed_index)] += 1
type1.append(performance_measures[0][1])
type2.append(performance_measures[1][0])
template = (
'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'
)
correct = np.diag(performance_measures)
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
plt.title('Type1 vs Type2 Error')
for i in range(0, len(type1)):
plt.scatter(type1[i], type2[i])
plt.xlabel('type1')
plt.ylabel('type2')
plt.legend(offset, loc='best')
plt.show()
<|reserved_special_token_1|>
import os.path
import numpy as np
import matplotlib.pyplot as plt
import util
import collections
def learn_distributions(file_lists_by_category):
"""
Estimate the parameters p_d, and q_d from the training set
Input
-----
file_lists_by_category: A two-element list. The first element is a list of
spam files, and the second element is a list of ham files.
Output
------
probabilities_by_category: A two-element tuple. The first element is a dict
whose keys are words, and whose values are the smoothed estimates of p_d;
the second element is a dict whose keys are words, and whose values are the
smoothed estimates of q_d
"""
### TODO: Write your code here
#get word frequncies in each email category
#key:word, value: number of occurences in this email loader
spam_dict = util.get_word_freq(file_lists_by_category[0])
ham_dict = util.get_word_freq(file_lists_by_category[1])
#get total length of each email loader
spam_length = sum(spam_dict.values())
ham_length = sum(ham_dict.values())
#get the length of the dictionary: D
dict_D = util.Counter()
for key in spam_dict:
dict_D[key] += spam_dict[key]
for key in ham_dict:
dict_D[key] += ham_dict[key]
D = len(dict_D)
spam_distribution = {}
ham_distribution = {}
#get the distributions of two email loaders
for i in dict_D:
spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)
for i in dict_D:
ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)
#create the required tuple
probabilities_by_category = (spam_distribution, ham_distribution)
return probabilities_by_category
def classify_new_email(filename,probabilities_by_category,prior_by_category):
"""
Use Naive Bayes classification to classify the email in the given file.
Inputs
------
filename: name of the file to be classified
probabilities_by_category: output of function learn_distributions
prior_by_category: A two-element list as [\pi, 1-\pi], where \pi is the
parameter in the prior class distribution
Output
------
classify_result: A two-element tuple. The first element is a string whose value
is either 'spam' or 'ham' depending on the classification result, and the
second element is a two-element list as [log p(y=1|x), log p(y=0|x)],
representing the log posterior probabilities
"""
### TODO: Write your code here
spam_distribution = 0
ham_distribution = 0
word_frequency = util.get_word_freq([filename])
for w in word_frequency:
if w in probabilities_by_category[0]:
spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])
if w in probabilities_by_category[1]:
ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])
spam_distribution += np.log(prior_by_category[0])
ham_distribution += np.log(prior_by_category[1])
predict = ""
if(spam_distribution > ham_distribution):
predict = "spam"
else:
predict = "ham"
word_distribution = [spam_distribution, ham_distribution]
classify_result = (predict, word_distribution)
return classify_result
if __name__ == '__main__':
# folder for training and testing
spam_folder = "data/spam"
ham_folder = "data/ham"
test_folder = "data/testing"
# generate the file lists for training
file_lists = []
for folder in (spam_folder, ham_folder):
file_lists.append(util.get_files_in_folder(folder))
# Learn the distributions
probabilities_by_category = learn_distributions(file_lists)
# prior class distribution
priors_by_category = [0.5, 0.5]
# Store the classification results
performance_measures = np.zeros([2,2])
# explanation of performance_measures:
# columns and rows are indexed by 0 = 'spam' and 1 = 'ham'
# rows correspond to true label, columns correspond to guessed label
# to be more clear, performance_measures = [[p1 p2]
# [p3 p4]]
# p1 = Number of emails whose true label is 'spam' and classified as 'spam'
# p2 = Number of emails whose true label is 'spam' and classified as 'ham'
# p3 = Number of emails whose true label is 'ham' and classified as 'spam'
# p4 = Number of emails whose true label is 'ham' and classified as 'ham'
# Classify emails from testing set and measure the performance
for filename in (util.get_files_in_folder(test_folder)):
# Classify
label,log_posterior = classify_new_email(filename,
probabilities_by_category,
priors_by_category)
# Measure performance (the filename indicates the true label)
base = os.path.basename(filename)
true_index = ('ham' in base)
guessed_index = (label == 'ham')
performance_measures[int(true_index), int(guessed_index)] += 1
template="You correctly classified %d out of %d spam emails, and %d out of %d ham emails."
# Correct counts are on the diagonal
correct = np.diag(performance_measures)
# totals are obtained by summing across guessed labels
totals = np.sum(performance_measures, 1)
print(template % (correct[0],totals[0],correct[1],totals[1]))
### TODO: Write your code here to modify the decision rule such that
### Type 1 and Type 2 errors can be traded off, plot the trade-off curve
print("----type 1 and 2 here-----")
offset = [-1E2, -1E1, -1E0, 1E0, 1E1]
type1 = []
type2 = []
for offset_value in offset:
performance_measures = np.zeros([2, 2])
for filename in (util.get_files_in_folder(test_folder)):
# Classify
label, log_posterior = classify_new_email(filename,
probabilities_by_category,
priors_by_category)
#add offset
if(log_posterior[0] + offset_value > log_posterior[1]):
label = "spam"
else:
label = "ham"
# Measure performance (the filename indicates the true label)
base = os.path.basename(filename)
true_index = ('ham' in base)
guessed_index = (label == 'ham')
performance_measures[int(true_index), int(guessed_index)] += 1
type1.append(performance_measures[0][1])
type2.append(performance_measures[1][0])
template = "You correctly classified %d out of %d spam emails, and %d out of %d ham emails."
# Correct counts are on the diagonal
correct = np.diag(performance_measures)
# totals are obtained by summing across guessed labels
totals = np.sum(performance_measures, 1)
print(template % (correct[0], totals[0], correct[1], totals[1]))
plt.title("Type1 vs Type2 Error")
for i in range(0, len(type1)):
plt.scatter(type1[i], type2[i])
plt.xlabel("type1")
plt.ylabel("type2")
plt.legend(offset, loc='best')
plt.show()
|
flexible
|
{
"blob_id": "7ed84706ace2cbf523021887df1e13d113f9ce4c",
"index": 4172,
"step-1": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\nif __name__ == '__main__':\n spam_folder = 'data/spam'\n ham_folder = 'data/ham'\n test_folder = 'data/testing'\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n probabilities_by_category = learn_distributions(file_lists)\n priors_by_category = [0.5, 0.5]\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n print('----type 1 and 2 here-----')\n offset = [-100.0, -10.0, -1.0, 1.0, 10.0]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n if log_posterior[0] + offset_value > log_posterior[1]:\n label = 'spam'\n else:\n label = 'ham'\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title('Type1 vs Type2 Error')\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n plt.xlabel('type1')\n plt.ylabel('type2')\n plt.legend(offset, loc='best')\n plt.show()\n",
"step-4": "import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\nimport collections\n\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n spam_distribution = {}\n ham_distribution = {}\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n probabilities_by_category = spam_distribution, ham_distribution\n return probabilities_by_category\n\n\ndef classify_new_email(filename, probabilities_by_category, prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\\\pi, 1-\\\\pi], where \\\\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(\n probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(\n probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n predict = ''\n if spam_distribution > ham_distribution:\n predict = 'spam'\n else:\n predict = 'ham'\n word_distribution = [spam_distribution, ham_distribution]\n classify_result = predict, word_distribution\n return classify_result\n\n\nif __name__ == '__main__':\n spam_folder = 'data/spam'\n ham_folder = 'data/ham'\n test_folder = 'data/testing'\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n probabilities_by_category = learn_distributions(file_lists)\n priors_by_category = [0.5, 0.5]\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n print('----type 1 and 2 here-----')\n offset = [-100.0, -10.0, -1.0, 1.0, 10.0]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in util.get_files_in_folder(test_folder):\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category, priors_by_category)\n if log_posterior[0] + offset_value > log_posterior[1]:\n label = 'spam'\n else:\n label = 'ham'\n base = os.path.basename(filename)\n true_index = 'ham' in base\n guessed_index = label == 'ham'\n performance_measures[int(true_index), int(guessed_index)] += 1\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n template = (\n 'You correctly classified %d out of %d spam emails, and %d out of %d ham emails.'\n )\n correct = np.diag(performance_measures)\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title('Type1 vs Type2 Error')\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n plt.xlabel('type1')\n plt.ylabel('type2')\n plt.legend(offset, loc='best')\n plt.show()\n",
"step-5": "import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\nimport collections\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n\n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of\n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict\n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the\n smoothed estimates of q_d\n \"\"\"\n ### TODO: Write your code here\n\n #get word frequncies in each email category\n #key:word, value: number of occurences in this email loader\n spam_dict = util.get_word_freq(file_lists_by_category[0])\n ham_dict = util.get_word_freq(file_lists_by_category[1])\n\n #get total length of each email loader\n spam_length = sum(spam_dict.values())\n ham_length = sum(ham_dict.values())\n\n #get the length of the dictionary: D\n dict_D = util.Counter()\n for key in spam_dict:\n dict_D[key] += spam_dict[key]\n for key in ham_dict:\n dict_D[key] += ham_dict[key]\n D = len(dict_D)\n\n spam_distribution = {}\n ham_distribution = {}\n #get the distributions of two email loaders\n for i in dict_D:\n spam_distribution[i] = (spam_dict[i] + 1) / (D + spam_length)\n\n for i in dict_D:\n ham_distribution[i] = (ham_dict[i] + 1) / (D + ham_length)\n #create the required tuple\n probabilities_by_category = (spam_distribution, ham_distribution)\n return probabilities_by_category\n\n\ndef classify_new_email(filename,probabilities_by_category,prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the\n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the\n second element is a two-element list as [log p(y=1|x), log p(y=0|x)],\n representing the log posterior probabilities\n \"\"\"\n ### TODO: Write your code here\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n\n predict = \"\"\n if(spam_distribution > ham_distribution):\n predict = \"spam\"\n else:\n predict = \"ham\"\n\n word_distribution = [spam_distribution, ham_distribution]\n\n classify_result = (predict, word_distribution)\n\n return classify_result\n\nif __name__ == '__main__':\n\n # folder for training and testing\n spam_folder = \"data/spam\"\n ham_folder = \"data/ham\"\n test_folder = \"data/testing\"\n\n # generate the file lists for training\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n\n\n # Learn the distributions\n probabilities_by_category = learn_distributions(file_lists)\n\n # prior class distribution\n priors_by_category = [0.5, 0.5]\n\n # Store the classification results\n performance_measures = np.zeros([2,2])\n # explanation of performance_measures:\n # columns and rows are indexed by 0 = 'spam' and 1 = 'ham'\n # rows correspond to true label, columns correspond to guessed label\n # to be more clear, performance_measures = [[p1 p2]\n # [p3 p4]]\n # p1 = Number of emails whose true label is 'spam' and classified as 'spam'\n # p2 = Number of emails whose true label is 'spam' and classified as 'ham'\n # p3 = Number of emails whose true label is 'ham' and classified as 'spam'\n # p4 = Number of emails whose true label is 'ham' and classified as 'ham'\n\n # Classify emails from testing set and measure the performance\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label,log_posterior = classify_new_email(filename,\n probabilities_by_category,\n priors_by_category)\n\n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base)\n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n\n template=\"You correctly classified %d out of %d spam emails, and %d out of %d ham emails.\"\n # Correct counts are on the diagonal\n correct = np.diag(performance_measures)\n # totals are obtained by summing across guessed labels\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0],totals[0],correct[1],totals[1]))\n\n\n ### TODO: Write your code here to modify the decision rule such that\n ### Type 1 and Type 2 errors can be traded off, plot the trade-off curve\n print(\"----type 1 and 2 here-----\")\n offset = [-1E2, -1E1, -1E0, 1E0, 1E1]\n type1 = []\n type2 = []\n for offset_value in offset:\n performance_measures = np.zeros([2, 2])\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label, log_posterior = classify_new_email(filename,\n probabilities_by_category,\n priors_by_category)\n\n #add offset\n if(log_posterior[0] + offset_value > log_posterior[1]):\n label = \"spam\"\n else:\n label = \"ham\"\n\n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base)\n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n\n type1.append(performance_measures[0][1])\n type2.append(performance_measures[1][0])\n\n template = \"You correctly classified %d out of %d spam emails, and %d out of %d ham emails.\"\n # Correct counts are on the diagonal\n correct = np.diag(performance_measures)\n # totals are obtained by summing across guessed labels\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0], totals[0], correct[1], totals[1]))\n plt.title(\"Type1 vs Type2 Error\")\n for i in range(0, len(type1)):\n plt.scatter(type1[i], type2[i])\n\n plt.xlabel(\"type1\")\n plt.ylabel(\"type2\")\n plt.legend(offset, loc='best')\n plt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from setuptools import setup
setup(name = "dragonfab",
version = "1.3.0",
description = "Fabric support",
author = "Joel Pitt",
author_email = "joel@joelpitt.com",
url = "https://github.com/ferrouswheel/dragonfab",
install_requires = ['fabric', 'pip>=1.4', 'wheel'],
packages = ['dragonfab'],
)
|
normal
|
{
"blob_id": "61135a10adefd6ba8ffd63e997fa91ce9c78de06",
"index": 6444,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='dragonfab', version='1.3.0', description='Fabric support',\n author='Joel Pitt', author_email='joel@joelpitt.com', url=\n 'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',\n 'pip>=1.4', 'wheel'], packages=['dragonfab'])\n",
"step-3": "from setuptools import setup\nsetup(name='dragonfab', version='1.3.0', description='Fabric support',\n author='Joel Pitt', author_email='joel@joelpitt.com', url=\n 'https://github.com/ferrouswheel/dragonfab', install_requires=['fabric',\n 'pip>=1.4', 'wheel'], packages=['dragonfab'])\n",
"step-4": "from setuptools import setup\n\nsetup(name = \"dragonfab\",\n version = \"1.3.0\",\n description = \"Fabric support\",\n author = \"Joel Pitt\",\n author_email = \"joel@joelpitt.com\",\n url = \"https://github.com/ferrouswheel/dragonfab\",\n install_requires = ['fabric', 'pip>=1.4', 'wheel'],\n packages = ['dragonfab'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
Can you print numbers from 1 to 100 without using any loop.
'''
# Use Recursion
|
flexible
|
{
"blob_id": "cc703690151acd17430b5a9715e71a694fdeca10",
"index": 2116,
"step-1": "<mask token>\n",
"step-2": "'''\nCan you print numbers from 1 to 100 without using any loop.\n'''\n\n# Use Recursion",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../')
<|reserved_special_token_0|>
if __name__ == '__main__':
fn = 'input.txt'
with open(fn) as f:
program = Program([int(i) for i in f.readline().split(',')])
program.run()
result = program.instructions
<|reserved_special_token_1|>
import sys
sys.path.append('../')
from IntcodeComputer.intcode import Program
if __name__ == '__main__':
fn = 'input.txt'
with open(fn) as f:
program = Program([int(i) for i in f.readline().split(',')])
program.run()
result = program.instructions
|
flexible
|
{
"blob_id": "a54c8ab63c1e0f50d254d6c97ca3f167db7142e9",
"index": 4956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../')\n<mask token>\nif __name__ == '__main__':\n fn = 'input.txt'\n with open(fn) as f:\n program = Program([int(i) for i in f.readline().split(',')])\n program.run()\n result = program.instructions\n",
"step-3": "import sys\nsys.path.append('../')\nfrom IntcodeComputer.intcode import Program\nif __name__ == '__main__':\n fn = 'input.txt'\n with open(fn) as f:\n program = Program([int(i) for i in f.readline().split(',')])\n program.run()\n result = program.instructions\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class ObjectValidationErrors(Exception):
def __init__(self, errors):
self.errors = errors
def _get_directory():
p = os.path.dirname(__file__)
p = os.path.join(p, os.pardir, os.pardir, 'schema')
p = os.path.abspath(p)
return p
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ObjectValidationErrors(Exception):
def __init__(self, errors):
self.errors = errors
def _get_directory():
p = os.path.dirname(__file__)
p = os.path.join(p, os.pardir, os.pardir, 'schema')
p = os.path.abspath(p)
return p
def _get_schema(name):
""" Load, if necessary, the schema for the specific name
and return it """
global SCHEMA
loaded_schema = SCHEMA.get(name)
if not loaded_schema:
filename = '{}/{}.json'.format(_get_directory(), name)
if os.path.exists(filename):
SCHEMA[name] = json.load(open(filename, 'r'))
return SCHEMA.get(name)
def validation_check(object_type, data):
from jsonschema import Draft4Validator
schema = _get_schema(object_type)
if not schema:
raise Exception()
new_validators = v.load_validators()
custom_validator = validators.extend(Draft4Validator, validators=
new_validators)
validator = custom_validator(schema)
errors = sorted(validator.iter_errors(data), key=lambda e: e.path)
errors = [v.message for v in errors]
return errors
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SCHEMA = {'publisher': None, 'dataset': None}
class ObjectValidationErrors(Exception):
def __init__(self, errors):
self.errors = errors
def _get_directory():
p = os.path.dirname(__file__)
p = os.path.join(p, os.pardir, os.pardir, 'schema')
p = os.path.abspath(p)
return p
def _get_schema(name):
""" Load, if necessary, the schema for the specific name
and return it """
global SCHEMA
loaded_schema = SCHEMA.get(name)
if not loaded_schema:
filename = '{}/{}.json'.format(_get_directory(), name)
if os.path.exists(filename):
SCHEMA[name] = json.load(open(filename, 'r'))
return SCHEMA.get(name)
def validation_check(object_type, data):
from jsonschema import Draft4Validator
schema = _get_schema(object_type)
if not schema:
raise Exception()
new_validators = v.load_validators()
custom_validator = validators.extend(Draft4Validator, validators=
new_validators)
validator = custom_validator(schema)
errors = sorted(validator.iter_errors(data), key=lambda e: e.path)
errors = [v.message for v in errors]
return errors
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import json
import pubtool.lib.validators as v
from jsonschema import validate, validators
from jsonschema.exceptions import ValidationError
SCHEMA = {'publisher': None, 'dataset': None}
class ObjectValidationErrors(Exception):
def __init__(self, errors):
self.errors = errors
def _get_directory():
p = os.path.dirname(__file__)
p = os.path.join(p, os.pardir, os.pardir, 'schema')
p = os.path.abspath(p)
return p
def _get_schema(name):
""" Load, if necessary, the schema for the specific name
and return it """
global SCHEMA
loaded_schema = SCHEMA.get(name)
if not loaded_schema:
filename = '{}/{}.json'.format(_get_directory(), name)
if os.path.exists(filename):
SCHEMA[name] = json.load(open(filename, 'r'))
return SCHEMA.get(name)
def validation_check(object_type, data):
from jsonschema import Draft4Validator
schema = _get_schema(object_type)
if not schema:
raise Exception()
new_validators = v.load_validators()
custom_validator = validators.extend(Draft4Validator, validators=
new_validators)
validator = custom_validator(schema)
errors = sorted(validator.iter_errors(data), key=lambda e: e.path)
errors = [v.message for v in errors]
return errors
<|reserved_special_token_1|>
"""
Schema management for various object types (publisher, dataset etc). Loads
the jsonschema and allows callers to validate a dictionary against them.
"""
import os
import json
import pubtool.lib.validators as v
from jsonschema import validate, validators
from jsonschema.exceptions import ValidationError
SCHEMA = {
"publisher": None,
"dataset": None
}
class ObjectValidationErrors(Exception):
def __init__(self, errors):
self.errors = errors
def _get_directory():
p = os.path.dirname(__file__)
p = os.path.join(p, os.pardir, os.pardir, "schema")
p = os.path.abspath(p)
return p
def _get_schema(name):
""" Load, if necessary, the schema for the specific name
and return it """
global SCHEMA
loaded_schema = SCHEMA.get(name)
if not loaded_schema:
filename = "{}/{}.json".format(_get_directory(), name)
if os.path.exists(filename):
SCHEMA[name] = json.load(open(filename, 'r'))
return SCHEMA.get(name)
def validation_check(object_type, data):
from jsonschema import Draft4Validator
schema = _get_schema(object_type)
if not schema:
# raise ValidationError, not Exception
raise Exception()
new_validators = v.load_validators()
custom_validator = validators.extend(
Draft4Validator,
validators=new_validators
)
validator = custom_validator(schema)
errors = sorted(validator.iter_errors(data), key=lambda e: e.path)
errors = [v.message for v in errors]
return errors
|
flexible
|
{
"blob_id": "c4f39f9212fbe0f591543d143cb8f1721c1f8e1e",
"index": 7056,
"step-1": "<mask token>\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n",
"step-3": "<mask token>\nSCHEMA = {'publisher': None, 'dataset': None}\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n",
"step-4": "<mask token>\nimport os\nimport json\nimport pubtool.lib.validators as v\nfrom jsonschema import validate, validators\nfrom jsonschema.exceptions import ValidationError\nSCHEMA = {'publisher': None, 'dataset': None}\n\n\nclass ObjectValidationErrors(Exception):\n\n def __init__(self, errors):\n self.errors = errors\n\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, 'schema')\n p = os.path.abspath(p)\n return p\n\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = '{}/{}.json'.format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n return SCHEMA.get(name)\n\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n schema = _get_schema(object_type)\n if not schema:\n raise Exception()\n new_validators = v.load_validators()\n custom_validator = validators.extend(Draft4Validator, validators=\n new_validators)\n validator = custom_validator(schema)\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n return errors\n",
"step-5": "\"\"\"\nSchema management for various object types (publisher, dataset etc). Loads\nthe jsonschema and allows callers to validate a dictionary against them.\n\"\"\"\nimport os\nimport json\n\nimport pubtool.lib.validators as v\n\nfrom jsonschema import validate, validators\nfrom jsonschema.exceptions import ValidationError\n\nSCHEMA = {\n \"publisher\": None,\n \"dataset\": None\n}\n\nclass ObjectValidationErrors(Exception):\n def __init__(self, errors):\n self.errors = errors\n\ndef _get_directory():\n p = os.path.dirname(__file__)\n p = os.path.join(p, os.pardir, os.pardir, \"schema\")\n p = os.path.abspath(p)\n return p\n\ndef _get_schema(name):\n \"\"\" Load, if necessary, the schema for the specific name\n and return it \"\"\"\n global SCHEMA\n\n loaded_schema = SCHEMA.get(name)\n if not loaded_schema:\n filename = \"{}/{}.json\".format(_get_directory(), name)\n if os.path.exists(filename):\n SCHEMA[name] = json.load(open(filename, 'r'))\n\n return SCHEMA.get(name)\n\ndef validation_check(object_type, data):\n from jsonschema import Draft4Validator\n\n schema = _get_schema(object_type)\n if not schema:\n # raise ValidationError, not Exception\n raise Exception()\n\n new_validators = v.load_validators()\n\n custom_validator = validators.extend(\n Draft4Validator,\n validators=new_validators\n )\n validator = custom_validator(schema)\n\n errors = sorted(validator.iter_errors(data), key=lambda e: e.path)\n errors = [v.message for v in errors]\n\n return errors",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
__author__ = 'Freek'
__build__ = 'versie 1.0'
from iNStagram.file_io.fileio import lees_stationgegevens
from iNStagram.api_requests.app_requests import request_instagram
from tkinter import *
startscherm = Tk()
startscherm.title('Foto of video in de buurt!')
startscherm.minsize(width=790, height=600, )
startscherm.configure(bg='yellow')
infolabel = Label(startscherm, fg='blue', text='Voer een station in')
infolabel.place(x=0, y=0)
e = Entry(master=startscherm, fg='black')
e.place(x=93, y=480)
T = Text(startscherm, height=25, width=120, bg='yellow', fg='blue')
T.pack()
def weergeef_instagram_links():
"""
Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)
:param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren
:type stationnaam: str
:return: station dict met namen en locatie
:rtype: dict
"""
stationnaam = e.get()
stations = lees_stationgegevens()
for station in stations:
if stationnaam in station["namen"].values():
print("Station gevonden")
lat, lon = station["locatie"]
lat = float(lat)
lon = float(lon)
instagram_data_dict = request_instagram(lat, lon)
for data in instagram_data_dict:
print(data) # eerst kijken
import datetime
regeltekst = "%-30s %s %s %s"%(data["plaatsnaam"],datetime.datetime.fromtimestamp(data["tijd"]),data["link"],data["type"])
T.insert(END, regeltekst + '\n')
else:
print("Geen station gevonden")
# return "GEEEN STAZION"
b = Button(master=startscherm, text="Zoek naar media", width=20, height=3, bg='blue', fg='white',
command=weergeef_instagram_links)
b.place(x=93, y=500)
startscherm.mainloop()
|
normal
|
{
"blob_id": "2804d49fc9f0e40859de1e8eb4f04a849639b1d4",
"index": 8277,
"step-1": "<mask token>\n\n\ndef weergeef_instagram_links():\n \"\"\"\n Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)\n :param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren\n :type stationnaam: str\n :return: station dict met namen en locatie\n :rtype: dict\n \"\"\"\n stationnaam = e.get()\n stations = lees_stationgegevens()\n for station in stations:\n if stationnaam in station['namen'].values():\n print('Station gevonden')\n lat, lon = station['locatie']\n lat = float(lat)\n lon = float(lon)\n instagram_data_dict = request_instagram(lat, lon)\n for data in instagram_data_dict:\n print(data)\n import datetime\n regeltekst = '%-30s %s %s %s' % (data['plaatsnaam'],\n datetime.datetime.fromtimestamp(data['tijd']), data[\n 'link'], data['type'])\n T.insert(END, regeltekst + '\\n')\n else:\n print('Geen station gevonden')\n\n\n<mask token>\n",
"step-2": "<mask token>\nstartscherm.title('Foto of video in de buurt!')\nstartscherm.minsize(width=790, height=600)\nstartscherm.configure(bg='yellow')\n<mask token>\ninfolabel.place(x=0, y=0)\n<mask token>\ne.place(x=93, y=480)\n<mask token>\nT.pack()\n\n\ndef weergeef_instagram_links():\n \"\"\"\n Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)\n :param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren\n :type stationnaam: str\n :return: station dict met namen en locatie\n :rtype: dict\n \"\"\"\n stationnaam = e.get()\n stations = lees_stationgegevens()\n for station in stations:\n if stationnaam in station['namen'].values():\n print('Station gevonden')\n lat, lon = station['locatie']\n lat = float(lat)\n lon = float(lon)\n instagram_data_dict = request_instagram(lat, lon)\n for data in instagram_data_dict:\n print(data)\n import datetime\n regeltekst = '%-30s %s %s %s' % (data['plaatsnaam'],\n datetime.datetime.fromtimestamp(data['tijd']), data[\n 'link'], data['type'])\n T.insert(END, regeltekst + '\\n')\n else:\n print('Geen station gevonden')\n\n\n<mask token>\nb.place(x=93, y=500)\nstartscherm.mainloop()\n",
"step-3": "__author__ = 'Freek'\n__build__ = 'versie 1.0'\n<mask token>\nstartscherm = Tk()\nstartscherm.title('Foto of video in de buurt!')\nstartscherm.minsize(width=790, height=600)\nstartscherm.configure(bg='yellow')\ninfolabel = Label(startscherm, fg='blue', text='Voer een station in')\ninfolabel.place(x=0, y=0)\ne = Entry(master=startscherm, fg='black')\ne.place(x=93, y=480)\nT = Text(startscherm, height=25, width=120, bg='yellow', fg='blue')\nT.pack()\n\n\ndef weergeef_instagram_links():\n \"\"\"\n Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)\n :param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren\n :type stationnaam: str\n :return: station dict met namen en locatie\n :rtype: dict\n \"\"\"\n stationnaam = e.get()\n stations = lees_stationgegevens()\n for station in stations:\n if stationnaam in station['namen'].values():\n print('Station gevonden')\n lat, lon = station['locatie']\n lat = float(lat)\n lon = float(lon)\n instagram_data_dict = request_instagram(lat, lon)\n for data in instagram_data_dict:\n print(data)\n import datetime\n regeltekst = '%-30s %s %s %s' % (data['plaatsnaam'],\n datetime.datetime.fromtimestamp(data['tijd']), data[\n 'link'], data['type'])\n T.insert(END, regeltekst + '\\n')\n else:\n print('Geen station gevonden')\n\n\nb = Button(master=startscherm, text='Zoek naar media', width=20, height=3,\n bg='blue', fg='white', command=weergeef_instagram_links)\nb.place(x=93, y=500)\nstartscherm.mainloop()\n",
"step-4": "__author__ = 'Freek'\n__build__ = 'versie 1.0'\nfrom iNStagram.file_io.fileio import lees_stationgegevens\nfrom iNStagram.api_requests.app_requests import request_instagram\nfrom tkinter import *\nstartscherm = Tk()\nstartscherm.title('Foto of video in de buurt!')\nstartscherm.minsize(width=790, height=600)\nstartscherm.configure(bg='yellow')\ninfolabel = Label(startscherm, fg='blue', text='Voer een station in')\ninfolabel.place(x=0, y=0)\ne = Entry(master=startscherm, fg='black')\ne.place(x=93, y=480)\nT = Text(startscherm, height=25, width=120, bg='yellow', fg='blue')\nT.pack()\n\n\ndef weergeef_instagram_links():\n \"\"\"\n Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)\n :param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren\n :type stationnaam: str\n :return: station dict met namen en locatie\n :rtype: dict\n \"\"\"\n stationnaam = e.get()\n stations = lees_stationgegevens()\n for station in stations:\n if stationnaam in station['namen'].values():\n print('Station gevonden')\n lat, lon = station['locatie']\n lat = float(lat)\n lon = float(lon)\n instagram_data_dict = request_instagram(lat, lon)\n for data in instagram_data_dict:\n print(data)\n import datetime\n regeltekst = '%-30s %s %s %s' % (data['plaatsnaam'],\n datetime.datetime.fromtimestamp(data['tijd']), data[\n 'link'], data['type'])\n T.insert(END, regeltekst + '\\n')\n else:\n print('Geen station gevonden')\n\n\nb = Button(master=startscherm, text='Zoek naar media', width=20, height=3,\n bg='blue', fg='white', command=weergeef_instagram_links)\nb.place(x=93, y=500)\nstartscherm.mainloop()\n",
"step-5": "__author__ = 'Freek'\n__build__ = 'versie 1.0'\n\nfrom iNStagram.file_io.fileio import lees_stationgegevens\nfrom iNStagram.api_requests.app_requests import request_instagram\n\nfrom tkinter import *\n\nstartscherm = Tk()\nstartscherm.title('Foto of video in de buurt!')\nstartscherm.minsize(width=790, height=600, )\nstartscherm.configure(bg='yellow')\ninfolabel = Label(startscherm, fg='blue', text='Voer een station in')\ninfolabel.place(x=0, y=0)\n\ne = Entry(master=startscherm, fg='black')\ne.place(x=93, y=480)\n\nT = Text(startscherm, height=25, width=120, bg='yellow', fg='blue')\nT.pack()\n\ndef weergeef_instagram_links():\n \"\"\"\n Geeft de bijbehorende station dict uit de lijst van alle stations (in de NS API)\n :param stationnaam: geef ofwel kort, middel als lange stationnaam om de bijbehorende station te identificeren\n :type stationnaam: str\n :return: station dict met namen en locatie\n :rtype: dict\n \"\"\"\n stationnaam = e.get()\n stations = lees_stationgegevens()\n for station in stations:\n if stationnaam in station[\"namen\"].values():\n print(\"Station gevonden\")\n lat, lon = station[\"locatie\"]\n lat = float(lat)\n lon = float(lon)\n instagram_data_dict = request_instagram(lat, lon)\n for data in instagram_data_dict:\n print(data) # eerst kijken\n import datetime\n regeltekst = \"%-30s %s %s %s\"%(data[\"plaatsnaam\"],datetime.datetime.fromtimestamp(data[\"tijd\"]),data[\"link\"],data[\"type\"])\n T.insert(END, regeltekst + '\\n')\n\n\n else:\n print(\"Geen station gevonden\")\n # return \"GEEEN STAZION\"\n\n\nb = Button(master=startscherm, text=\"Zoek naar media\", width=20, height=3, bg='blue', fg='white',\n command=weergeef_instagram_links)\nb.place(x=93, y=500)\n\nstartscherm.mainloop()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys, os
sys.path.append(os.path.abspath('../models'))
from GANSynth import flags as lib_flags
from GANSynth import generate_util as gu
from GANSynth import model as lib_model
from GANSynth import util
from GANSynth import train_util
import tensorflow as tf
import numpy as np
import json
from models.GenerativeModel import GenerativeModel
class GANSynthWrapper(GenerativeModel):
def __init__(self, ckpt_path, data_size, use_approx=True):
super(GANSynthWrapper, self).__init__(use_approx=use_approx)
self.latent_size = 256
self.data_size = data_size
self.data_dim = 1
self.expected_distance = 22.6
self.gen_data_size = 64000
self.model = lib_model.Model.load_from_path(ckpt_path)
self.batch_size = self.model.batch_size
self.build()
self.sess = tf.Session()
exp_vars = tf.global_variables()
exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or 'global_step' in var.name]
init_op = tf.initialize_variables(exp_vars)
self.sess.run(init_op)
def build(self):
self.random_idx = tf.placeholder(tf.int32, shape=(None, 1), name='random_idx')
self.target_data = tf.placeholder(tf.float32, shape=(None, self.data_size), name='target_data')
with tf.name_scope('Gradient'):
slices = tf.gather(self.model.fake_waves_ph[..., 0], self.random_idx[..., 0], axis=1)
# slices = tf.gather(tf.reshape(self.model.fake_data_ph[..., 0], [-1, 128 * 1024]), self.random_idx[..., 0], axis=1)
self.gradient = self.jacobian(slices, self.model.noises_ph, parallel_iterations=1)
def calc_model_gradient(self, latent_vector):
if self.use_approx:
jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-5)
return jacobian
else:
idx = np.arange(self.data_size).reshape(-1, 1)
extend_z = np.zeros((self.batch_size, self.latent_size))
min_size = np.minimum(latent_vector.shape[0], 8)
extend_z[:min_size] = latent_vector[:min_size]
pitches = []
for i in range(extend_z.shape[0]):
pitches.append(50)
pitches = np.array(pitches)
labels = self.model._pitches_to_labels(pitches)
gradient = self.sess.run(self.gradient, feed_dict={self.model.labels_ph: labels, self.model.noises_ph: extend_z, self.random_idx: idx})[0]
return gradient
def calc_model_gradient_FDM(self, latent_vector, delta=1e-4):
sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=self.latent_size + 1, axis=0)
sample_latents[1:] += np.identity(self.latent_size) * delta
sample_datas = self.decode(sample_latents)
jacobian = (sample_datas[1:] - sample_datas[0]).T / delta
idx = np.random.choice(self.data_size, 1024, replace=False)
return jacobian[idx]
def generate_data(self, n=1, z=None):
if z is None:
z = np.random.normal(size=[n, self.latent_size])
pitches = []
for i in range(z.shape[0]):
pitches.append(50)
pitches = np.array(pitches)
waves = self.model.generate_samples_from_z(z, pitches, max_audio_length=self.data_size)
return waves
def decode(self, latent_vector):
pitches = []
for i in range(latent_vector.shape[0]):
pitches.append(50)
pitches = np.array(pitches)
# print(latent_vector.shape)
waves = self.model.generate_samples_from_z(latent_vector, pitches, max_audio_length=self.data_size)
return waves
def get_random_latent(self):
return np.random.normal(0, 1, self.latent_size)
|
normal
|
{
"blob_id": "f13a2820fe1766354109d1163c7e6fe887cd6f34",
"index": 7051,
"step-1": "<mask token>\n\n\nclass GANSynthWrapper(GenerativeModel):\n\n def __init__(self, ckpt_path, data_size, use_approx=True):\n super(GANSynthWrapper, self).__init__(use_approx=use_approx)\n self.latent_size = 256\n self.data_size = data_size\n self.data_dim = 1\n self.expected_distance = 22.6\n self.gen_data_size = 64000\n self.model = lib_model.Model.load_from_path(ckpt_path)\n self.batch_size = self.model.batch_size\n self.build()\n self.sess = tf.Session()\n exp_vars = tf.global_variables()\n exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or\n 'global_step' in var.name]\n init_op = tf.initialize_variables(exp_vars)\n self.sess.run(init_op)\n <mask token>\n\n def calc_model_gradient(self, latent_vector):\n if self.use_approx:\n jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-05)\n return jacobian\n else:\n idx = np.arange(self.data_size).reshape(-1, 1)\n extend_z = np.zeros((self.batch_size, self.latent_size))\n min_size = np.minimum(latent_vector.shape[0], 8)\n extend_z[:min_size] = latent_vector[:min_size]\n pitches = []\n for i in range(extend_z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n labels = self.model._pitches_to_labels(pitches)\n gradient = self.sess.run(self.gradient, feed_dict={self.model.\n labels_ph: labels, self.model.noises_ph: extend_z, self.\n random_idx: idx})[0]\n return gradient\n\n def calc_model_gradient_FDM(self, latent_vector, delta=0.0001):\n sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=\n self.latent_size + 1, axis=0)\n sample_latents[1:] += np.identity(self.latent_size) * delta\n sample_datas = self.decode(sample_latents)\n jacobian = (sample_datas[1:] - sample_datas[0]).T / delta\n idx = np.random.choice(self.data_size, 1024, replace=False)\n return jacobian[idx]\n\n def generate_data(self, n=1, z=None):\n if z is None:\n z = np.random.normal(size=[n, self.latent_size])\n pitches = []\n for i in range(z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(z, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def decode(self, latent_vector):\n pitches = []\n for i in range(latent_vector.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(latent_vector, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def get_random_latent(self):\n return np.random.normal(0, 1, self.latent_size)\n",
"step-2": "<mask token>\n\n\nclass GANSynthWrapper(GenerativeModel):\n\n def __init__(self, ckpt_path, data_size, use_approx=True):\n super(GANSynthWrapper, self).__init__(use_approx=use_approx)\n self.latent_size = 256\n self.data_size = data_size\n self.data_dim = 1\n self.expected_distance = 22.6\n self.gen_data_size = 64000\n self.model = lib_model.Model.load_from_path(ckpt_path)\n self.batch_size = self.model.batch_size\n self.build()\n self.sess = tf.Session()\n exp_vars = tf.global_variables()\n exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or\n 'global_step' in var.name]\n init_op = tf.initialize_variables(exp_vars)\n self.sess.run(init_op)\n\n def build(self):\n self.random_idx = tf.placeholder(tf.int32, shape=(None, 1), name=\n 'random_idx')\n self.target_data = tf.placeholder(tf.float32, shape=(None, self.\n data_size), name='target_data')\n with tf.name_scope('Gradient'):\n slices = tf.gather(self.model.fake_waves_ph[..., 0], self.\n random_idx[..., 0], axis=1)\n self.gradient = self.jacobian(slices, self.model.noises_ph,\n parallel_iterations=1)\n\n def calc_model_gradient(self, latent_vector):\n if self.use_approx:\n jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-05)\n return jacobian\n else:\n idx = np.arange(self.data_size).reshape(-1, 1)\n extend_z = np.zeros((self.batch_size, self.latent_size))\n min_size = np.minimum(latent_vector.shape[0], 8)\n extend_z[:min_size] = latent_vector[:min_size]\n pitches = []\n for i in range(extend_z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n labels = self.model._pitches_to_labels(pitches)\n gradient = self.sess.run(self.gradient, feed_dict={self.model.\n labels_ph: labels, self.model.noises_ph: extend_z, self.\n random_idx: idx})[0]\n return gradient\n\n def calc_model_gradient_FDM(self, latent_vector, delta=0.0001):\n sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=\n self.latent_size + 1, axis=0)\n sample_latents[1:] += np.identity(self.latent_size) * delta\n sample_datas = self.decode(sample_latents)\n jacobian = (sample_datas[1:] - sample_datas[0]).T / delta\n idx = np.random.choice(self.data_size, 1024, replace=False)\n return jacobian[idx]\n\n def generate_data(self, n=1, z=None):\n if z is None:\n z = np.random.normal(size=[n, self.latent_size])\n pitches = []\n for i in range(z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(z, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def decode(self, latent_vector):\n pitches = []\n for i in range(latent_vector.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(latent_vector, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def get_random_latent(self):\n return np.random.normal(0, 1, self.latent_size)\n",
"step-3": "<mask token>\nsys.path.append(os.path.abspath('../models'))\n<mask token>\n\n\nclass GANSynthWrapper(GenerativeModel):\n\n def __init__(self, ckpt_path, data_size, use_approx=True):\n super(GANSynthWrapper, self).__init__(use_approx=use_approx)\n self.latent_size = 256\n self.data_size = data_size\n self.data_dim = 1\n self.expected_distance = 22.6\n self.gen_data_size = 64000\n self.model = lib_model.Model.load_from_path(ckpt_path)\n self.batch_size = self.model.batch_size\n self.build()\n self.sess = tf.Session()\n exp_vars = tf.global_variables()\n exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or\n 'global_step' in var.name]\n init_op = tf.initialize_variables(exp_vars)\n self.sess.run(init_op)\n\n def build(self):\n self.random_idx = tf.placeholder(tf.int32, shape=(None, 1), name=\n 'random_idx')\n self.target_data = tf.placeholder(tf.float32, shape=(None, self.\n data_size), name='target_data')\n with tf.name_scope('Gradient'):\n slices = tf.gather(self.model.fake_waves_ph[..., 0], self.\n random_idx[..., 0], axis=1)\n self.gradient = self.jacobian(slices, self.model.noises_ph,\n parallel_iterations=1)\n\n def calc_model_gradient(self, latent_vector):\n if self.use_approx:\n jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-05)\n return jacobian\n else:\n idx = np.arange(self.data_size).reshape(-1, 1)\n extend_z = np.zeros((self.batch_size, self.latent_size))\n min_size = np.minimum(latent_vector.shape[0], 8)\n extend_z[:min_size] = latent_vector[:min_size]\n pitches = []\n for i in range(extend_z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n labels = self.model._pitches_to_labels(pitches)\n gradient = self.sess.run(self.gradient, feed_dict={self.model.\n labels_ph: labels, self.model.noises_ph: extend_z, self.\n random_idx: idx})[0]\n return gradient\n\n def calc_model_gradient_FDM(self, latent_vector, delta=0.0001):\n sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=\n self.latent_size + 1, axis=0)\n sample_latents[1:] += np.identity(self.latent_size) * delta\n sample_datas = self.decode(sample_latents)\n jacobian = (sample_datas[1:] - sample_datas[0]).T / delta\n idx = np.random.choice(self.data_size, 1024, replace=False)\n return jacobian[idx]\n\n def generate_data(self, n=1, z=None):\n if z is None:\n z = np.random.normal(size=[n, self.latent_size])\n pitches = []\n for i in range(z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(z, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def decode(self, latent_vector):\n pitches = []\n for i in range(latent_vector.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(latent_vector, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def get_random_latent(self):\n return np.random.normal(0, 1, self.latent_size)\n",
"step-4": "import sys, os\nsys.path.append(os.path.abspath('../models'))\nfrom GANSynth import flags as lib_flags\nfrom GANSynth import generate_util as gu\nfrom GANSynth import model as lib_model\nfrom GANSynth import util\nfrom GANSynth import train_util\nimport tensorflow as tf\nimport numpy as np\nimport json\nfrom models.GenerativeModel import GenerativeModel\n\n\nclass GANSynthWrapper(GenerativeModel):\n\n def __init__(self, ckpt_path, data_size, use_approx=True):\n super(GANSynthWrapper, self).__init__(use_approx=use_approx)\n self.latent_size = 256\n self.data_size = data_size\n self.data_dim = 1\n self.expected_distance = 22.6\n self.gen_data_size = 64000\n self.model = lib_model.Model.load_from_path(ckpt_path)\n self.batch_size = self.model.batch_size\n self.build()\n self.sess = tf.Session()\n exp_vars = tf.global_variables()\n exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or\n 'global_step' in var.name]\n init_op = tf.initialize_variables(exp_vars)\n self.sess.run(init_op)\n\n def build(self):\n self.random_idx = tf.placeholder(tf.int32, shape=(None, 1), name=\n 'random_idx')\n self.target_data = tf.placeholder(tf.float32, shape=(None, self.\n data_size), name='target_data')\n with tf.name_scope('Gradient'):\n slices = tf.gather(self.model.fake_waves_ph[..., 0], self.\n random_idx[..., 0], axis=1)\n self.gradient = self.jacobian(slices, self.model.noises_ph,\n parallel_iterations=1)\n\n def calc_model_gradient(self, latent_vector):\n if self.use_approx:\n jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-05)\n return jacobian\n else:\n idx = np.arange(self.data_size).reshape(-1, 1)\n extend_z = np.zeros((self.batch_size, self.latent_size))\n min_size = np.minimum(latent_vector.shape[0], 8)\n extend_z[:min_size] = latent_vector[:min_size]\n pitches = []\n for i in range(extend_z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n labels = self.model._pitches_to_labels(pitches)\n gradient = self.sess.run(self.gradient, feed_dict={self.model.\n labels_ph: labels, self.model.noises_ph: extend_z, self.\n random_idx: idx})[0]\n return gradient\n\n def calc_model_gradient_FDM(self, latent_vector, delta=0.0001):\n sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=\n self.latent_size + 1, axis=0)\n sample_latents[1:] += np.identity(self.latent_size) * delta\n sample_datas = self.decode(sample_latents)\n jacobian = (sample_datas[1:] - sample_datas[0]).T / delta\n idx = np.random.choice(self.data_size, 1024, replace=False)\n return jacobian[idx]\n\n def generate_data(self, n=1, z=None):\n if z is None:\n z = np.random.normal(size=[n, self.latent_size])\n pitches = []\n for i in range(z.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(z, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def decode(self, latent_vector):\n pitches = []\n for i in range(latent_vector.shape[0]):\n pitches.append(50)\n pitches = np.array(pitches)\n waves = self.model.generate_samples_from_z(latent_vector, pitches,\n max_audio_length=self.data_size)\n return waves\n\n def get_random_latent(self):\n return np.random.normal(0, 1, self.latent_size)\n",
"step-5": "import sys, os\r\nsys.path.append(os.path.abspath('../models'))\r\n\r\nfrom GANSynth import flags as lib_flags\r\nfrom GANSynth import generate_util as gu\r\nfrom GANSynth import model as lib_model\r\nfrom GANSynth import util\r\nfrom GANSynth import train_util\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport json\r\nfrom models.GenerativeModel import GenerativeModel\r\n\r\nclass GANSynthWrapper(GenerativeModel):\r\n def __init__(self, ckpt_path, data_size, use_approx=True):\r\n super(GANSynthWrapper, self).__init__(use_approx=use_approx)\r\n\r\n self.latent_size = 256\r\n self.data_size = data_size\r\n self.data_dim = 1\r\n self.expected_distance = 22.6\r\n\r\n self.gen_data_size = 64000\r\n self.model = lib_model.Model.load_from_path(ckpt_path)\r\n self.batch_size = self.model.batch_size\r\n\r\n self.build()\r\n self.sess = tf.Session()\r\n\r\n exp_vars = tf.global_variables()\r\n exp_vars = [var for var in exp_vars if 'ExponentialMovingAverage' or 'global_step' in var.name]\r\n init_op = tf.initialize_variables(exp_vars)\r\n self.sess.run(init_op)\r\n\r\n def build(self):\r\n self.random_idx = tf.placeholder(tf.int32, shape=(None, 1), name='random_idx')\r\n self.target_data = tf.placeholder(tf.float32, shape=(None, self.data_size), name='target_data')\r\n\r\n with tf.name_scope('Gradient'):\r\n slices = tf.gather(self.model.fake_waves_ph[..., 0], self.random_idx[..., 0], axis=1)\r\n # slices = tf.gather(tf.reshape(self.model.fake_data_ph[..., 0], [-1, 128 * 1024]), self.random_idx[..., 0], axis=1)\r\n self.gradient = self.jacobian(slices, self.model.noises_ph, parallel_iterations=1)\r\n\r\n def calc_model_gradient(self, latent_vector):\r\n if self.use_approx:\r\n jacobian = self.calc_model_gradient_FDM(latent_vector, delta=5e-5)\r\n return jacobian\r\n else:\r\n idx = np.arange(self.data_size).reshape(-1, 1)\r\n extend_z = np.zeros((self.batch_size, self.latent_size))\r\n min_size = np.minimum(latent_vector.shape[0], 8)\r\n extend_z[:min_size] = latent_vector[:min_size]\r\n\r\n pitches = []\r\n for i in range(extend_z.shape[0]):\r\n pitches.append(50)\r\n pitches = np.array(pitches)\r\n labels = self.model._pitches_to_labels(pitches)\r\n\r\n gradient = self.sess.run(self.gradient, feed_dict={self.model.labels_ph: labels, self.model.noises_ph: extend_z, self.random_idx: idx})[0]\r\n return gradient\r\n\r\n def calc_model_gradient_FDM(self, latent_vector, delta=1e-4):\r\n sample_latents = np.repeat(latent_vector.reshape(1, -1), repeats=self.latent_size + 1, axis=0)\r\n sample_latents[1:] += np.identity(self.latent_size) * delta\r\n\r\n sample_datas = self.decode(sample_latents)\r\n\r\n jacobian = (sample_datas[1:] - sample_datas[0]).T / delta\r\n idx = np.random.choice(self.data_size, 1024, replace=False)\r\n return jacobian[idx]\r\n\r\n def generate_data(self, n=1, z=None):\r\n if z is None:\r\n z = np.random.normal(size=[n, self.latent_size])\r\n pitches = []\r\n for i in range(z.shape[0]):\r\n pitches.append(50)\r\n pitches = np.array(pitches)\r\n waves = self.model.generate_samples_from_z(z, pitches, max_audio_length=self.data_size)\r\n\r\n return waves\r\n\r\n def decode(self, latent_vector):\r\n pitches = []\r\n for i in range(latent_vector.shape[0]):\r\n pitches.append(50)\r\n pitches = np.array(pitches)\r\n # print(latent_vector.shape)\r\n waves = self.model.generate_samples_from_z(latent_vector, pitches, max_audio_length=self.data_size)\r\n\r\n return waves\r\n\r\n def get_random_latent(self):\r\n return np.random.normal(0, 1, self.latent_size)",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
class BebopNmpcControl:
<|reserved_special_token_0|>
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.
orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.
pose.orientation.z, odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0
], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.
orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.
orientation.z, pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.
pose.orientation.x, pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]
)
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,
rpy_goal[2]])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.
mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.
mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.
mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)
).reshape(-1)
<|reserved_special_token_0|>
def run_nlp_solver(self):
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
self.set_nlp_params()
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.
mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.
nlp_lbg_, ubg=self.nlp_ubg_)
if self.nlp_solver_complied_.stats()['success'] is False:
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn('MPC infeasible!')
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec(
) * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',
solver_iter, solver_time)
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self
.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +
self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec(
) > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.
bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
self.run_nlp_solver()
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.
yawrate_max_)
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,
yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = 'map'
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_
[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn('MPC trajectory plan not published!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BebopNmpcControl:
<|reserved_special_token_0|>
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.
orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.
pose.orientation.z, odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0
], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.
orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.
orientation.z, pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.
pose.orientation.x, pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]
)
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,
rpy_goal[2]])
def obs_motion_prediction(self):
for iStage in range(0, self.mpc_N_):
self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3
] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_
<|reserved_special_token_0|>
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.
mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.
mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.
mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)
).reshape(-1)
def set_nlp_params(self):
parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))
for iStage in range(0, self.mpc_N_):
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_start, iStage] = np.array([self.
bebop_state_current_[0], self.bebop_state_current_[1], self
.bebop_state_current_[2], self.bebop_state_current_[8]])
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_size, iStage] = self.bebop_size_
parameters_all_stage[self.mpc_form_param_.param_index_obs_info,
iStage] = np.concatenate((self.obs_state_prediction_[0:3,
iStage], self.obs_size_))
if iStage == self.mpc_N_ - 1:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((self.
mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
else:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((0.05 *
self.mpc_weights_wp_, self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.
mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))
def run_nlp_solver(self):
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
self.set_nlp_params()
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.
mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.
nlp_lbg_, ubg=self.nlp_ubg_)
if self.nlp_solver_complied_.stats()['success'] is False:
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn('MPC infeasible!')
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec(
) * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',
solver_iter, solver_time)
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self
.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +
self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec(
) > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.
bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
self.run_nlp_solver()
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.
yawrate_max_)
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,
yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = 'map'
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_
[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn('MPC trajectory plan not published!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BebopNmpcControl:
<|reserved_special_token_0|>
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.
orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.
pose.orientation.z, odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0
], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.
orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.
orientation.z, pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.
pose.orientation.x, pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]
)
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,
rpy_goal[2]])
def obs_motion_prediction(self):
for iStage in range(0, self.mpc_N_):
self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3
] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_
def reset_nlp_solver(self):
u_reset = np.zeros(self.mpc_nu_)
x_reset = np.zeros(self.mpc_nx_)
s_reset = np.zeros(self.mpc_ns_)
x_reset[0:3] = self.bebop_state_current_[0:3]
x_reset[6:8] = self.bebop_state_current_[6:8]
nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(
-1)
self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(
-1)
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.
mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.
mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.
mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)
).reshape(-1)
def set_nlp_params(self):
parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))
for iStage in range(0, self.mpc_N_):
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_start, iStage] = np.array([self.
bebop_state_current_[0], self.bebop_state_current_[1], self
.bebop_state_current_[2], self.bebop_state_current_[8]])
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_size, iStage] = self.bebop_size_
parameters_all_stage[self.mpc_form_param_.param_index_obs_info,
iStage] = np.concatenate((self.obs_state_prediction_[0:3,
iStage], self.obs_size_))
if iStage == self.mpc_N_ - 1:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((self.
mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
else:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((0.05 *
self.mpc_weights_wp_, self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.
mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))
def run_nlp_solver(self):
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
self.set_nlp_params()
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.
mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.
nlp_lbg_, ubg=self.nlp_ubg_)
if self.nlp_solver_complied_.stats()['success'] is False:
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn('MPC infeasible!')
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec(
) * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',
solver_iter, solver_time)
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self
.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +
self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec(
) > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.
bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
self.run_nlp_solver()
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.
yawrate_max_)
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,
yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = 'map'
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_
[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn('MPC trajectory plan not published!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BebopNmpcControl:
def __init__(self, mpc_form_param):
self.mpc_form_param_ = mpc_form_param
self.roll_max_ = self.mpc_form_param_.roll_max
self.pitch_max_ = self.mpc_form_param_.pitch_max
self.vz_max_ = self.mpc_form_param_.vz_max
self.yawrate_max_ = self.mpc_form_param_.yawrate_max
self.K_yaw_ = self.mpc_form_param_.K_yaw
self.bebop_size_ = self.mpc_form_param_.bebop_size
self.bebop_state_current_ = np.zeros(9)
self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])
self.nobs_ = self.mpc_form_param_.nobs
self.obs_size_ = self.mpc_form_param_.obs_size
self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])
self.obs_state_prediction_ = np.tile(np.array(self.
obs_state_current_), (self.mpc_form_param_.N, 1)).T
self.mpc_dt_ = self.mpc_form_param_.dt
self.mpc_N_ = self.mpc_form_param_.N
self.mpc_Tf_ = self.mpc_form_param_.Tf
self.mpc_nx_ = self.mpc_form_param_.nx
self.mpc_nu_ = self.mpc_form_param_.nu
self.mpc_ns_ = self.mpc_form_param_.ns
self.mpc_np_ = self.mpc_form_param_.nparam
self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp
self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input
self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll
self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack
self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.
mpc_N_)).reshape(-1)
self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_
self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))
self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))
self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))
self.mpc_u_now_ = np.zeros(self.mpc_nu_)
self.mpc_feasible_ = False
self.mpc_success_ = False
recompile = False
[self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.
nlp_lbg_, self.nlp_ubg_] = bebop_nmpc_casadi_solver(self.
mpc_form_param_, recompile)
self.odom_sub_ = rospy.Subscriber('/bebop/odom', Odometry, self.
set_bebop_odom)
self.received_first_odom_ = False
self.odom_received_time_ = rospy.Time.now()
self.odom_time_out_ = 0.2
self.pose_sub_ = rospy.Subscriber('/bebop/pose', PoseStamped, self.
set_bebop_pose)
self.twist_sub_ = rospy.Subscriber('/bebop/twist', TwistStamped,
self.set_bebop_twist)
self.pose_goal_sub_ = rospy.Subscriber('/bebop/pose_goal',
PoseStamped, self.set_bebop_pose_goal)
self.received_first_goal_ = False
self.bebop_cmd_vel_ = np.array(4)
self.bebop_cmd_vel_pub_ = rospy.Publisher('/bebop/auto_cmd_vel',
Twist, queue_size=1)
self.mpc_traj_plan_vis_pub_ = rospy.Publisher(
'/bebop/mpc/trajectory_plan_vis', Marker, queue_size=1)
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.
orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.
pose.orientation.z, odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0
], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.
orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.
orientation.z, pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False:
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.
pose.orientation.x, pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]
)
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,
rpy_goal[2]])
def obs_motion_prediction(self):
for iStage in range(0, self.mpc_N_):
self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3
] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_
def reset_nlp_solver(self):
u_reset = np.zeros(self.mpc_nu_)
x_reset = np.zeros(self.mpc_nx_)
s_reset = np.zeros(self.mpc_ns_)
x_reset[0:3] = self.bebop_state_current_[0:3]
x_reset[6:8] = self.bebop_state_current_[6:8]
nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(
-1)
self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(
-1)
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.
mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.
mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.
mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)
).reshape(-1)
def set_nlp_params(self):
parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))
for iStage in range(0, self.mpc_N_):
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_start, iStage] = np.array([self.
bebop_state_current_[0], self.bebop_state_current_[1], self
.bebop_state_current_[2], self.bebop_state_current_[8]])
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_
parameters_all_stage[self.mpc_form_param_.
param_index_bebop_size, iStage] = self.bebop_size_
parameters_all_stage[self.mpc_form_param_.param_index_obs_info,
iStage] = np.concatenate((self.obs_state_prediction_[0:3,
iStage], self.obs_size_))
if iStage == self.mpc_N_ - 1:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((self.
mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
else:
parameters_all_stage[self.mpc_form_param_.
param_index_mpc_weights, iStage] = np.hstack((0.05 *
self.mpc_weights_wp_, self.mpc_weights_input_, self.
mpc_weights_coll_, self.mpc_weights_slack_))
self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.
mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))
def run_nlp_solver(self):
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
self.set_nlp_params()
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.
mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.
nlp_lbg_, ubg=self.nlp_ubg_)
if self.nlp_solver_complied_.stats()['success'] is False:
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn('MPC infeasible!')
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec(
) * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',
solver_iter, solver_time)
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self
.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +
self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec(
) > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.
bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
self.run_nlp_solver()
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.
yawrate_max_)
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,
yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = 'map'
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_
[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn('MPC trajectory plan not published!')
def bebop_nmpc_control():
rospy.loginfo('Starting Bebop NMPC Control...')
rospy.init_node('bebop_nmpc_control_node', anonymous=False)
hz = 50
rate = rospy.Rate(hz)
rospy.sleep(1.0)
mpc_form_param = BebopNmpcFormulationParam()
bebop_nmpc = BebopNmpcControl(mpc_form_param)
while not rospy.is_shutdown():
if bebop_nmpc.received_first_odom_ is False:
rospy.logwarn('Waiting for first Odometry!')
elif bebop_nmpc.received_first_goal_ is False:
rospy.logwarn('Waiting for first goal pose!')
else:
bebop_nmpc.calculate_bebop_cmd_vel()
bebop_nmpc.pub_bebop_cmd_vel()
bebop_nmpc.pub_mpc_traj_plan_vis()
rate.sleep()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
import numpy as np
import rospy
import tf
from geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point
from nav_msgs.msg import Odometry
from visualization_msgs.msg import Marker
from bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver
# The frame by default is NWU
class BebopNmpcControl:
def __init__(self, mpc_form_param):
# MPC formulation settings
self.mpc_form_param_ = mpc_form_param
# bebop param
self.roll_max_ = self.mpc_form_param_.roll_max
self.pitch_max_ = self.mpc_form_param_.pitch_max
self.vz_max_ = self.mpc_form_param_.vz_max
self.yawrate_max_ = self.mpc_form_param_.yawrate_max
self.K_yaw_ = self.mpc_form_param_.K_yaw
self.bebop_size_ = self.mpc_form_param_.bebop_size
# state and goal pose, size
self.bebop_state_current_ = np.zeros(9)
self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])
# collision avoidance obs param
self.nobs_ = self.mpc_form_param_.nobs
self.obs_size_ = self.mpc_form_param_.obs_size
self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])
self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T
# MPC settings
self.mpc_dt_ = self.mpc_form_param_.dt
self.mpc_N_ = self.mpc_form_param_.N
self.mpc_Tf_ = self.mpc_form_param_.Tf
self.mpc_nx_ = self.mpc_form_param_.nx
self.mpc_nu_ = self.mpc_form_param_.nu
self.mpc_ns_ = self.mpc_form_param_.ns
self.mpc_np_ = self.mpc_form_param_.nparam
self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp
self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input
self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll
self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack
# MPC variables
self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1)
self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_
self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))
self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))
self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))
self.mpc_u_now_ = np.zeros(self.mpc_nu_)
self.mpc_feasible_ = False
self.mpc_success_ = False
# MPC solver
recompile = False
[self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \
bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile)
# ROS subscriber
self.odom_sub_ = rospy.Subscriber("/bebop/odom", Odometry, self.set_bebop_odom) # bebop_odom
self.received_first_odom_ = False
self.odom_received_time_ = rospy.Time.now()
self.odom_time_out_ = 0.2
self.pose_sub_ = rospy.Subscriber("/bebop/pose", PoseStamped, self.set_bebop_pose)
self.twist_sub_ = rospy.Subscriber("/bebop/twist", TwistStamped, self.set_bebop_twist)
self.pose_goal_sub_ = rospy.Subscriber("/bebop/pose_goal", PoseStamped, self.set_bebop_pose_goal)
self.received_first_goal_ = False
# ROS publisher
self.bebop_cmd_vel_ = np.array(4)
self.bebop_cmd_vel_pub_ = rospy.Publisher("/bebop/auto_cmd_vel", Twist, queue_size=1)
self.mpc_traj_plan_vis_pub_ = rospy.Publisher("/bebop/mpc/trajectory_plan_vis", Marker, queue_size=1)
def set_bebop_odom(self, odom_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First odometry received!')
# read data
self.odom_received_time_ = rospy.Time.now()
px = odom_msg.pose.pose.position.x
py = odom_msg.pose.pose.position.y
pz = odom_msg.pose.pose.position.z
vx = odom_msg.twist.twist.linear.x
vy = odom_msg.twist.twist.linear.y
vz = odom_msg.twist.twist.linear.z
rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x,
odom_msg.pose.pose.orientation.y,
odom_msg.pose.pose.orientation.z,
odom_msg.pose.pose.orientation.w])
self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False: # if not received any goal pose
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_pose(self, pose_msg):
if self.received_first_odom_ is False:
self.received_first_odom_ = True
rospy.loginfo('First pose received!')
self.odom_received_time_ = rospy.Time.now()
px = pose_msg.pose.position.x
py = pose_msg.pose.position.y
pz = pose_msg.pose.position.z
rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x,
pose_msg.pose.orientation.y,
pose_msg.pose.orientation.z,
pose_msg.pose.orientation.w])
self.bebop_state_current_[0:3] = np.array([px, py, pz])
self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])
if self.received_first_goal_ is False: # if not received any goal pose
self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])
def set_bebop_twist(self, twist_msg):
vx = twist_msg.twist.linear.x
vy = twist_msg.twist.linear.y
vz = twist_msg.twist.linear.z
self.bebop_state_current_[3:6] = np.array([vx, vy, vz])
def set_bebop_pose_goal(self, pose_goal_msg):
if self.received_first_goal_ is False:
self.received_first_goal_ = True
rospy.loginfo('First pose goal received!')
px_goal = pose_goal_msg.pose.position.x
py_goal = pose_goal_msg.pose.position.y
pz_goal = pose_goal_msg.pose.position.z
rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x,
pose_goal_msg.pose.orientation.y,
pose_goal_msg.pose.orientation.z,
pose_goal_msg.pose.orientation.w])
self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]])
def obs_motion_prediction(self):
for iStage in range(0, self.mpc_N_):
self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \
+ self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_
def reset_nlp_solver(self):
# initialize plan
u_reset = np.zeros(self.mpc_nu_)
x_reset = np.zeros(self.mpc_nx_)
s_reset = np.zeros(self.mpc_ns_)
# x_reset = self.bebop_state_current_[:self.mpc_nx_]
x_reset[0:3] = self.bebop_state_current_[0:3]
x_reset[6:8] = self.bebop_state_current_[6:8]
nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1)
self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1)
def initialize_nlp_solver(self):
u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1)
x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1)
s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1)
self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1)
def set_nlp_params(self):
parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage
for iStage in range(0, self.mpc_N_):
parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \
np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2],
self.bebop_state_current_[8]])
parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_
parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_
parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate((
self.obs_state_prediction_[0:3, iStage], self.obs_size_
))
if iStage == self.mpc_N_ - 1: # terminal weights
parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(
(self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_,
self.mpc_weights_coll_, self.mpc_weights_slack_)
)
else:
parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(
(0.05 * self.mpc_weights_wp_, self.mpc_weights_input_,
self.mpc_weights_coll_, self.mpc_weights_slack_)
)
# set parameters
self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_],
np.transpose(parameters_all_stage).reshape(-1)))
def run_nlp_solver(self):
# initialize solver
if self.mpc_feasible_ is True:
self.initialize_nlp_solver()
else:
self.reset_nlp_solver()
# set solver params
self.set_nlp_params()
# call the solver
time_before_solver = rospy.get_rostime()
nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_,
p=self.mpc_nlp_param_,
lbx=self.nlp_lbx_,
ubx=self.nlp_ubx_,
lbg=self.nlp_lbg_,
ubg=self.nlp_ubg_)
# deal with infeasibility
if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible
self.mpc_feasible_ = False
self.mpc_success_ = False
rospy.logwarn("MPC infeasible!")
else:
self.mpc_feasible_ = True
self.mpc_success_ = True
solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0
solver_iter = self.nlp_solver_complied_.stats()['iter_count']
rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time)
# obtain solution
traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_))
self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])
self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :])
self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :])
self.mpc_u_now_ = self.mpc_u_plan_[:, 0]
def calculate_bebop_cmd_vel(self):
# if odom received
time_now = rospy.Time.now()
if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_:
rospy.logwarn('Odometry time out! Will try to make the MAV hover.')
self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9]))
else:
# run the nlp solver
self.run_nlp_solver()
# control commands
if self.mpc_success_ is True:
roll_cmd = self.mpc_u_now_[0]
pitch_cmd = self.mpc_u_now_[1]
vz_cmd = self.mpc_u_now_[2]
else:
rospy.logwarn('MPC failure! Default commands sent.')
roll_cmd = 0.0
pitch_cmd = 0.0
vz_cmd = 0.0
# yaw control
yaw_now = self.bebop_state_current_[8]
yaw_ref = self.bebop_pose_goal_[3]
yaw_error = yaw_ref - yaw_now
while np.abs(yaw_error) > np.pi:
if yaw_error > 0.0:
yaw_error = yaw_error - 2.0 * np.pi
else:
yaw_error = yaw_error + 2.0 * np.pi
yawrate_cmd = self.K_yaw_ * yaw_error
yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_)
# obtained command
self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd])
def pub_bebop_cmd_vel(self):
try:
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x
cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y
cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_
cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_
self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)
except:
rospy.logwarn('Bebop cmd_vel command not published!')
def pub_mpc_traj_plan_vis(self):
try:
marker_msg = Marker()
marker_msg.header.frame_id = "map"
marker_msg.header.stamp = rospy.Time.now()
marker_msg.type = 8
marker_msg.action = 0
# set the scale of the marker
marker_msg.scale.x = 0.2
marker_msg.scale.y = 0.2
marker_msg.scale.z = 0.2
# set the color
marker_msg.color.r = 1.0
marker_msg.color.g = 0.0
marker_msg.color.b = 0.0
marker_msg.color.a = 1.0
# Set the pose of the marker
marker_msg.pose.position.x = 0.0
marker_msg.pose.position.y = 0.0
marker_msg.pose.position.z = 0.0
marker_msg.pose.orientation.x = 0
marker_msg.pose.orientation.y = 0
marker_msg.pose.orientation.z = 0
marker_msg.pose.orientation.w = 1.0
# points
mpc_traj_plan_points = []
for iStage in range(0, self.mpc_N_):
point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage])
mpc_traj_plan_points.append(point)
marker_msg.points = mpc_traj_plan_points
self.mpc_traj_plan_vis_pub_.publish(marker_msg)
except:
rospy.logwarn("MPC trajectory plan not published!")
def bebop_nmpc_control():
# create a node
rospy.loginfo("Starting Bebop NMPC Control...")
rospy.init_node("bebop_nmpc_control_node", anonymous=False)
hz = 50
rate = rospy.Rate(hz)
rospy.sleep(1.0)
# formulation
mpc_form_param = BebopNmpcFormulationParam()
# control
bebop_nmpc = BebopNmpcControl(mpc_form_param)
while not rospy.is_shutdown():
if bebop_nmpc.received_first_odom_ is False:
rospy.logwarn('Waiting for first Odometry!')
elif bebop_nmpc.received_first_goal_ is False:
rospy.logwarn('Waiting for first goal pose!')
else:
bebop_nmpc.calculate_bebop_cmd_vel()
bebop_nmpc.pub_bebop_cmd_vel()
bebop_nmpc.pub_mpc_traj_plan_vis()
rate.sleep()
if __name__ == "__main__":
bebop_nmpc_control()
|
flexible
|
{
"blob_id": "76d0dd2d6b2d580900283f2623f05dd02a70fcd8",
"index": 6825,
"step-1": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n <mask token>\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n <mask token>\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BebopNmpcControl:\n\n def __init__(self, mpc_form_param):\n self.mpc_form_param_ = mpc_form_param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.\n obs_state_current_), (self.mpc_form_param_.N, 1)).T\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.\n mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n recompile = False\n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.\n nlp_lbg_, self.nlp_ubg_] = bebop_nmpc_casadi_solver(self.\n mpc_form_param_, recompile)\n self.odom_sub_ = rospy.Subscriber('/bebop/odom', Odometry, self.\n set_bebop_odom)\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n self.pose_sub_ = rospy.Subscriber('/bebop/pose', PoseStamped, self.\n set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber('/bebop/twist', TwistStamped,\n self.set_bebop_twist)\n self.pose_goal_sub_ = rospy.Subscriber('/bebop/pose_goal',\n PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher('/bebop/auto_cmd_vel',\n Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\n '/bebop/mpc/trajectory_plan_vis', Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\ndef bebop_nmpc_control():\n rospy.loginfo('Starting Bebop NMPC Control...')\n rospy.init_node('bebop_nmpc_control_node', anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n mpc_form_param = BebopNmpcFormulationParam()\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point\nfrom nav_msgs.msg import Odometry\nfrom visualization_msgs.msg import Marker\nfrom bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver\n\n\n# The frame by default is NWU\n\n\nclass BebopNmpcControl:\n def __init__(self, mpc_form_param):\n # MPC formulation settings\n self.mpc_form_param_ = mpc_form_param\n\n # bebop param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n\n # state and goal pose, size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n\n # collision avoidance obs param\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T\n\n # MPC settings\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n\n # MPC variables\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n\n # MPC solver\n recompile = False \n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \\\n bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile)\n\n # ROS subscriber\n self.odom_sub_ = rospy.Subscriber(\"/bebop/odom\", Odometry, self.set_bebop_odom) # bebop_odom\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n\n self.pose_sub_ = rospy.Subscriber(\"/bebop/pose\", PoseStamped, self.set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber(\"/bebop/twist\", TwistStamped, self.set_bebop_twist)\n\n self.pose_goal_sub_ = rospy.Subscriber(\"/bebop/pose_goal\", PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False \n\n # ROS publisher\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher(\"/bebop/auto_cmd_vel\", Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\"/bebop/mpc/trajectory_plan_vis\", Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n # read data\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x,\n odom_msg.pose.pose.orientation.y,\n odom_msg.pose.pose.orientation.z,\n odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x,\n pose_msg.pose.orientation.y,\n pose_msg.pose.orientation.z,\n pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x,\n pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z,\n pose_goal_msg.pose.orientation.w])\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \\\n + self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n # initialize plan\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n # x_reset = self.bebop_state_current_[:self.mpc_nx_]\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \\\n np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2],\n self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate((\n self.obs_state_prediction_[0:3, iStage], self.obs_size_\n ))\n if iStage == self.mpc_N_ - 1: # terminal weights\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n else:\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (0.05 * self.mpc_weights_wp_, self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n # set parameters\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_],\n np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n # initialize solver\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n\n # set solver params\n self.set_nlp_params()\n\n # call the solver\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_,\n p=self.mpc_nlp_param_,\n lbx=self.nlp_lbx_,\n ubx=self.nlp_ubx_,\n lbg=self.nlp_lbg_,\n ubg=self.nlp_ubg_)\n\n # deal with infeasibility\n if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn(\"MPC infeasible!\")\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time)\n\n # obtain solution\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n # if odom received\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n # run the nlp solver\n self.run_nlp_solver()\n\n # control commands\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n\n # yaw control\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_)\n\n # obtained command\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = \"map\"\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n # set the scale of the marker\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n # set the color\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n # Set the pose of the marker\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n # points\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn(\"MPC trajectory plan not published!\")\n\n\ndef bebop_nmpc_control():\n # create a node\n rospy.loginfo(\"Starting Bebop NMPC Control...\")\n rospy.init_node(\"bebop_nmpc_control_node\", anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n\n # formulation\n mpc_form_param = BebopNmpcFormulationParam()\n\n # control\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n bebop_nmpc_control()\n",
"step-ids": [
10,
12,
13,
15,
18
]
}
|
[
10,
12,
13,
15,
18
] |
#!/usr/bin/env python
from __future__ import division
import sys
import math
logs = sys.stderr
from collections import defaultdict
import time
from mytime import Mytime
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_string("weights", None, "weights file (feature instances and weights)", short_name="w")
flags.DEFINE_boolean("svector", False, "use David's svector (Cython) instead of Pythonic defaultdict")
flags.DEFINE_boolean("featstat", False, "print feature stats")
flags.DEFINE_string("outputweights", None, "write weights (in short-hand format); - for STDOUT", short_name="ow")
flags.DEFINE_boolean("autoeval", True, "use automatically generated eval module")
flags.DEFINE_integer("unk", 0, "treat words with count less than COUNT as UNKNOWN")
flags.DEFINE_boolean("debug_wordfreq", False, "print word freq info")
flags.DEFINE_boolean("unktag", False, "use POS tags for unknown words")
flags.DEFINE_boolean("unkdel", False, "remove features involving unks")
flags.DEFINE_boolean("s2", True, "use s2t features")
def new_vector():
return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda
class Model(object):
'''templates and weights.'''
## __slots__ = "templates", "weights", "list_templates", "freq_templates"
names = ["SHIFT", "LEFT", "RIGHT"]
indent = " " * 4
eval_module = None # by default, use my handwritten static_eval()
def __init__(self, weightstr):
self.knowns = set()
self.unk = FLAGS.unk
self.unktag = FLAGS.unktag
self.unkdel = FLAGS.unkdel
assert not (self.unkdel and self.unktag), "UNKDEL and UNKTAG can't be both true"
if FLAGS.svector: # now it is known
global svector
try:
svector = __import__("svector")
print >> logs, "WARNING: using David's svector (Cython). Performance might suffer."
except:
print >> logs, "WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster)."
FLAGS.svector = False # important
self.templates = {} # mapping from "s0t-q0t" to the eval expression
self.list_templates = [] # ordered list of template keys "s0t-q0t"
self.freq_templates = defaultdict(int)
self.weights = new_vector() #Vector()
self.read_weights(weightstr)
## self.featurenames = set(self.weights.iterkeys())
if FLAGS.featstat:
self.print_templates()
def count_knowns_from_train(self, trainfile, devfile):
'''used in training'''
print >> logs, "counting word freqs from %s, unktag=%s" % (trainfile, self.unktag)
stime = time.time()
words = defaultdict(int)
for i, line in enumerate(open(trainfile)):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
words[word] += 1
if FLAGS.debug_wordfreq:
devunk1 = set()
devunk0 = set()
for line in open(devfile):
for word in line.split():
word = word.strip("()").rsplit("/", 1)[0]
if words[word] <= self.unk and words[word] > 0:
devunk1.add(word)
if words[word] == 0:
devunk0.add(word)
print >> logs, "=1", len(devunk1), " ".join(sorted(devunk1))
print >> logs
print >> logs, "=0", len(devunk0), " ".join(sorted(devunk0))
## freqs = defaultdict(list)
## for word, freq in words.items():
## freqs[freq].append(word)
## for freq in sorted(freqs, reverse=True):
## print >> logs, freq, len(freqs[freq]), " ".join(sorted(freqs[freq]))
## print >> logs
self.knowns = set()
for word, freq in words.items():
if freq > self.unk:
self.knowns.add(word)
print >> logs, "%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds" % \
(i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)
## print >> logs, " ".join(sorted(self.knowns))
def add_template(self, s, freq=1):
## like this: "s0w-s0t=%s|%s" % (s0w, s0t)
symbols = s.split("-") # static part: s0w-s0t
if s not in self.templates:
tmp = '"%s=%s" %% (%s)' % (s, \
"|".join(["%s"] * len(symbols)), \
", ".join(symbols))
self.templates[s] = compile(tmp, "2", "eval")
self.list_templates.append((s, tmp)) # in order
self.freq_templates[s] += int(freq)
def print_autoevals(self):
tfilename = str(int(time.time()))
templatefile = open("/tmp/%s.py" % tfilename, "wt")
print >> templatefile, "#generated by model.py"
print >> templatefile, "import sys; print >> sys.stderr, 'importing succeeded!'"
print >> templatefile, "def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):"
print >> templatefile, "%sreturn [" % Model.indent
for s, e in self.list_templates:
print >> templatefile, "%s%s," % (Model.indent * 2, e)
print >> templatefile, "%s]" % (Model.indent * 2)
templatefile.close()
if FLAGS.autoeval:
sys.path.append('/tmp/')
print >> logs, "importing auto-generated file /tmp/%s.py" % tfilename
# to be used in newstate
Model.eval_module = __import__(tfilename)
else:
Model.eval_module = Model
def print_templates(self, f=logs):
print >> f, ">>> %d templates in total:" % len(self.templates)
print >> f, "\n".join(["%-20s\t%d" % (x, self.freq_templates[x]) \
for x, _ in self.list_templates])
print >> f, "---"
def read_templates(self, filename):
## try interpreting it as a filename, if failed, then as a string
try:
f = open(filename)
print >> logs, "reading templates from %s" % filename,
for x in f:
if x[:3] == "---":
break
if x[:3] == ">>>":
continue
try:
s, freq = x.split()
except:
s, freq = x, 1
self.add_template(s, freq)
except:
## from argv string rather than file
for x in filename.split():
self.add_template(x)
f = None
print >> logs, "%d feature templates read." % len(self.templates)
return f
def read_weights(self, filename, infertemplates=False):
'''instances are like "s0t-q0t=LRB-</s>=>LEFT 3.8234"'''
infile = self.read_templates(filename)
infertemplates = len(self.templates) <= 1
if infertemplates:
print >> logs, "will infer templates from weights..."
mytime = Mytime()
i = 0
if infile is not None:
print >> logs, "reading feature weights from %s\t" % filename,
for i, line in enumerate(infile, 1):
if i % 200000 == 0:
print >> logs, "%d lines read..." % i,
if line[0] == " ":
# TODO: separate known words line (last line)
self.knowns = set(line.split())
print >> logs, "\n%d known words read." % len(self.knowns)
self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x
break
feat, weight = line.split()
self.weights[feat] = float(weight)
if infertemplates:
self.add_template(feat.split("=", 1)[0], 1) ## one occurrence
print >> logs, "\n%d feature instances (%d lines) read in %.2lf seconds." % \
(len(self.weights), i, mytime.period())
self.print_autoevals()
def make_feats(self, state):
'''returns a *list* of feature templates for state.'''
fv = new_vector() #Vector()
top = state.top()
topnext = state.top(1)
top3rd = state.top(2)
qhead = state.qhead()
qnext = state.qhead(1)
## this part is manual; their combinations are automatic
s0 = top.head() if top is not None else ("<s>", "<s>") # N.B. (...)
s1 = topnext.head() if topnext is not None else ("<s>", "<s>")
s2 = top3rd.head() if top3rd is not None else ("<s>", "<s>")
q0 = qhead if qhead is not None else ("</s>", "</s>")
q1 = qnext if qnext is not None else ("</s>", "</s>")
s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else "NONE"
s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else "NONE"
s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else "NONE"
s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else "NONE"
## like this: "s0w-s0t=%s|%s" % (s0w, s0t) ---> returns a list here!
return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))
# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys
def write(self, filename="-", weights=None):
if weights is None:
weights = self.weights
if filename == "-":
outfile = sys.stdout
filename = "STDOUT" # careful overriding
else:
outfile = open(filename, "wt")
self.print_templates(outfile)
mytime = Mytime()
nonzero = 0
print >> logs, "sorting %d features..." % len(weights),
for i, f in enumerate(sorted(weights), 1):
if i == 1: # sorting done
print >> logs, "done in %.2lf seconds." % mytime.period()
print >> logs, "writing features to %s..." % filename
v = weights[f]
if math.fabs(v) > 1e-3:
print >> outfile, "%s\t%.5lf" % (f, v)
nonzero += 1
if self.unk > 0: # print known words
print >> outfile, " " + " ".join(sorted(self.knowns)) # " " to mark
print >> logs, "%d nonzero feature instances written in %.2lf seconds." % \
(nonzero, mytime.period()) ## nonzero != i
@staticmethod
def trim(fv):
for f in fv:
if math.fabs(fv[f]) < 1e-3:
del fv[f]
return fv
@staticmethod
def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):
return ["q0t=%s" % (q0t),
"q0w-q0t=%s|%s" % (q0w, q0t),
"q0w=%s" % (q0w),
"s0t-q0t-q1t=%s|%s|%s" % (s0t, q0t, q1t),
"s0t-q0t=%s|%s" % (s0t, q0t),
"s0t-s1t=%s|%s" % (s0t, s1t),
"s0t-s1w-s1t=%s|%s|%s" % (s0t, s1w, s1t),
"s0t=%s" % (s0t),
"s0w-q0t-q1t=%s|%s|%s" % (s0w, q0t, q1t),
"s0w-s0t-s1t=%s|%s|%s" % (s0w, s0t, s1t),
"s0w-s0t-s1w-s1t=%s|%s|%s|%s" % (s0w, s0t, s1w, s1t),
"s0w-s0t-s1w=%s|%s|%s" % (s0w, s0t, s1w),
"s0w-s0t=%s|%s" % (s0w, s0t),
"s0w-s1w-s1t=%s|%s|%s" % (s0w, s1w, s1t),
"s0w-s1w=%s|%s" % (s0w, s1w),
"s0w=%s" % (s0w),
"s1t-s0t-q0t=%s|%s|%s" % (s1t, s0t, q0t),
"s1t-s0t-s0lct=%s|%s|%s" % (s1t, s0t, s0lct),
"s1t-s0t-s0rct=%s|%s|%s" % (s1t, s0t, s0rct),
"s1t-s0w-q0t=%s|%s|%s" % (s1t, s0w, q0t),
"s1t-s0w-s0lct=%s|%s|%s" % (s1t, s0w, s0lct),
"s1t-s1lct-s0t=%s|%s|%s" % (s1t, s1lct, s0t),
"s1t-s1lct-s0w=%s|%s|%s" % (s1t, s1lct, s0w),
"s1t-s1rct-s0t=%s|%s|%s" % (s1t, s1rct, s0t),
"s1t-s1rct-s0w=%s|%s|%s" % (s1t, s1rct, s0w),
"s1t=%s" % (s1t),
"s1w-s1t=%s|%s" % (s1w, s1t),
"s1w=%s" % (s1w),
"s2t-s1t-s0t=%s|%s|%s" % (s2t, s1t, s0t)]
def prune(self, filenames):
'''prune features from word/tag lines'''
print >> logs, "pruning features using %s..." % filenames,
fullset = set()
for filename in filenames.split():
for l in open(filename):
for w, t in map(lambda x:x.rsplit("/", 1), l.split()):
fullset.add(w)
fullset.add(t)
print >> logs, "collected %d uniq words & tags..." % (len(fullset)),
new = new_vector() # Vector()
for f in self.weights:
stuff = f.split("=", 1)[1].rsplit("=", 1)[0].split("|") ## b/w 1st and last "=", but caution
for s in stuff:
if s not in fullset:
break
else:
new[f] = self.weights[f]
print >> logs, "%d features survived (ratio: %.2f)" % (len(new), len(new) / len(self.weights))
self.weights = new
def sparsify(self, z=1):
'''duchi et al., 2008'''
if __name__ == "__main__":
flags.DEFINE_string("prune", None, "prune features w.r.t. FILE (word/tag format)")
try:
argv = FLAGS(sys.argv)
if FLAGS.weights is None:
raise flags.FlagsError("must specify weights by -w ...")
except flags.FlagsError, e:
print >> logs, 'Error: %s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
FLAGS.featstat = True
model = Model(FLAGS.weights) #.model, FLAGS.weights)
if FLAGS.prune:
model.prune(FLAGS.prune)
if FLAGS.outputweights:
model.write(FLAGS.outputweights)
|
normal
|
{
"blob_id": "e5fd0fc13a39444a934eea3bd24056073d28eff2",
"index": 9869,
"step-1": "#!/usr/bin/env python\n\nfrom __future__ import division\n\nimport sys\nimport math\nlogs = sys.stderr\nfrom collections import defaultdict\n\nimport time\nfrom mytime import Mytime\n\nimport gflags as flags\nFLAGS=flags.FLAGS\n\nflags.DEFINE_string(\"weights\", None, \"weights file (feature instances and weights)\", short_name=\"w\")\nflags.DEFINE_boolean(\"svector\", False, \"use David's svector (Cython) instead of Pythonic defaultdict\")\nflags.DEFINE_boolean(\"featstat\", False, \"print feature stats\")\nflags.DEFINE_string(\"outputweights\", None, \"write weights (in short-hand format); - for STDOUT\", short_name=\"ow\")\nflags.DEFINE_boolean(\"autoeval\", True, \"use automatically generated eval module\")\nflags.DEFINE_integer(\"unk\", 0, \"treat words with count less than COUNT as UNKNOWN\")\nflags.DEFINE_boolean(\"debug_wordfreq\", False, \"print word freq info\")\nflags.DEFINE_boolean(\"unktag\", False, \"use POS tags for unknown words\")\nflags.DEFINE_boolean(\"unkdel\", False, \"remove features involving unks\")\nflags.DEFINE_boolean(\"s2\", True, \"use s2t features\")\n \ndef new_vector():\n return defaultdict(int) if not FLAGS.svector else svector.Vector() # do not use lambda \n\nclass Model(object):\n '''templates and weights.'''\n\n## __slots__ = \"templates\", \"weights\", \"list_templates\", \"freq_templates\"\n\n names = [\"SHIFT\", \"LEFT\", \"RIGHT\"]\n indent = \" \" * 4\n eval_module = None # by default, use my handwritten static_eval()\n \n def __init__(self, weightstr):\n\n self.knowns = set()\n self.unk = FLAGS.unk\n self.unktag = FLAGS.unktag\n self.unkdel = FLAGS.unkdel\n assert not (self.unkdel and self.unktag), \"UNKDEL and UNKTAG can't be both true\"\n\n if FLAGS.svector: # now it is known\n global svector\n try:\n svector = __import__(\"svector\")\n print >> logs, \"WARNING: using David's svector (Cython). Performance might suffer.\"\n except:\n print >> logs, \"WARNING: failed to import svector. using Pythonic defaultdict instead (actually faster).\"\n FLAGS.svector = False # important\n\n self.templates = {} # mapping from \"s0t-q0t\" to the eval expression\n self.list_templates = [] # ordered list of template keys \"s0t-q0t\"\n self.freq_templates = defaultdict(int)\n self.weights = new_vector() #Vector()\n\n self.read_weights(weightstr)\n## self.featurenames = set(self.weights.iterkeys())\n\n if FLAGS.featstat:\n self.print_templates()\n\n def count_knowns_from_train(self, trainfile, devfile):\n '''used in training'''\n\n print >> logs, \"counting word freqs from %s, unktag=%s\" % (trainfile, self.unktag)\n stime = time.time()\n\n words = defaultdict(int) \n for i, line in enumerate(open(trainfile)):\n for word in line.split():\n word = word.strip(\"()\").rsplit(\"/\", 1)[0]\n words[word] += 1\n\n if FLAGS.debug_wordfreq:\n devunk1 = set()\n devunk0 = set()\n for line in open(devfile): \n for word in line.split():\n word = word.strip(\"()\").rsplit(\"/\", 1)[0]\n if words[word] <= self.unk and words[word] > 0:\n devunk1.add(word)\n if words[word] == 0:\n devunk0.add(word)\n \n print >> logs, \"=1\", len(devunk1), \" \".join(sorted(devunk1))\n print >> logs\n print >> logs, \"=0\", len(devunk0), \" \".join(sorted(devunk0))\n\n## freqs = defaultdict(list)\n## for word, freq in words.items():\n## freqs[freq].append(word)\n\n## for freq in sorted(freqs, reverse=True):\n## print >> logs, freq, len(freqs[freq]), \" \".join(sorted(freqs[freq]))\n## print >> logs\n\n self.knowns = set()\n for word, freq in words.items():\n if freq > self.unk:\n self.knowns.add(word)\n\n print >> logs, \"%d lines: %d known (freq > %d), %d unknown. counted in %.2f seconds\" % \\\n (i+1, len(self.knowns), self.unk, len(words)-len(self.knowns), time.time() - stime)\n## print >> logs, \" \".join(sorted(self.knowns))\n\n def add_template(self, s, freq=1):\n ## like this: \"s0w-s0t=%s|%s\" % (s0w, s0t) \n symbols = s.split(\"-\") # static part: s0w-s0t\n if s not in self.templates:\n tmp = '\"%s=%s\" %% (%s)' % (s, \\\n \"|\".join([\"%s\"] * len(symbols)), \\\n \", \".join(symbols))\n \n self.templates[s] = compile(tmp, \"2\", \"eval\")\n \n self.list_templates.append((s, tmp)) # in order\n\n self.freq_templates[s] += int(freq)\n\n def print_autoevals(self):\n\n tfilename = str(int(time.time()))\n templatefile = open(\"/tmp/%s.py\" % tfilename, \"wt\")\n \n print >> templatefile, \"#generated by model.py\"\n print >> templatefile, \"import sys; print >> sys.stderr, 'importing succeeded!'\"\n print >> templatefile, \"def static_eval((q0w, q0t), (q1w, q1t), (q2w, q2t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):\"\n print >> templatefile, \"%sreturn [\" % Model.indent\n \n for s, e in self.list_templates:\n print >> templatefile, \"%s%s,\" % (Model.indent * 2, e)\n \n print >> templatefile, \"%s]\" % (Model.indent * 2)\n templatefile.close()\n\n if FLAGS.autoeval:\n sys.path.append('/tmp/')\n print >> logs, \"importing auto-generated file /tmp/%s.py\" % tfilename\n # to be used in newstate\n Model.eval_module = __import__(tfilename)\n else:\n Model.eval_module = Model \n \n def print_templates(self, f=logs):\n print >> f, \">>> %d templates in total:\" % len(self.templates)\n print >> f, \"\\n\".join([\"%-20s\\t%d\" % (x, self.freq_templates[x]) \\\n for x, _ in self.list_templates])\n print >> f, \"---\"\n\n def read_templates(self, filename):\n\n ## try interpreting it as a filename, if failed, then as a string\n try:\n f = open(filename)\n print >> logs, \"reading templates from %s\" % filename,\n for x in f:\n if x[:3] == \"---\":\n break\n if x[:3] == \">>>\":\n continue\n try:\n s, freq = x.split()\n except:\n s, freq = x, 1\n self.add_template(s, freq) \n \n except:\n ## from argv string rather than file\n for x in filename.split():\n self.add_template(x)\n f = None\n\n print >> logs, \"%d feature templates read.\" % len(self.templates)\n\n return f\n\n def read_weights(self, filename, infertemplates=False):\n '''instances are like \"s0t-q0t=LRB-</s>=>LEFT 3.8234\"'''\n\n infile = self.read_templates(filename)\n\n infertemplates = len(self.templates) <= 1\n if infertemplates:\n print >> logs, \"will infer templates from weights...\" \n\n mytime = Mytime()\n i = 0\n if infile is not None:\n print >> logs, \"reading feature weights from %s\\t\" % filename,\n for i, line in enumerate(infile, 1):\n if i % 200000 == 0:\n print >> logs, \"%d lines read...\" % i,\n\n if line[0] == \" \":\n # TODO: separate known words line (last line)\n self.knowns = set(line.split())\n print >> logs, \"\\n%d known words read.\" % len(self.knowns)\n self.unk = 1 # in cae you forgot to say it; doesn't matter 1 or x\n break\n\n feat, weight = line.split() \n self.weights[feat] = float(weight)\n\n if infertemplates:\n self.add_template(feat.split(\"=\", 1)[0], 1) ## one occurrence\n\n print >> logs, \"\\n%d feature instances (%d lines) read in %.2lf seconds.\" % \\\n (len(self.weights), i, mytime.period())\n\n self.print_autoevals()\n\n def make_feats(self, state):\n '''returns a *list* of feature templates for state.'''\n \n fv = new_vector() #Vector()\n top = state.top()\n topnext = state.top(1)\n top3rd = state.top(2)\n qhead = state.qhead()\n qnext = state.qhead(1)\n\n ## this part is manual; their combinations are automatic\n s0 = top.head() if top is not None else (\"<s>\", \"<s>\") # N.B. (...)\n s1 = topnext.head() if topnext is not None else (\"<s>\", \"<s>\") \n s2 = top3rd.head() if top3rd is not None else (\"<s>\", \"<s>\") \n\n q0 = qhead if qhead is not None else (\"</s>\", \"</s>\") \n q1 = qnext if qnext is not None else (\"</s>\", \"</s>\")\n\n s0lct = top.lefts[0].tag() if (top is not None and len(top.lefts) > 0) else \"NONE\"\n s0rct = top.rights[-1].tag() if (top is not None and len(top.rights) > 0) else \"NONE\"\n s1lct = topnext.lefts[0].tag() if (topnext is not None and len(topnext.lefts) > 0) else \"NONE\"\n s1rct = topnext.rights[-1].tag() if (topnext is not None and len(topnext.rights) > 0) else \"NONE\"\n \n ## like this: \"s0w-s0t=%s|%s\" % (s0w, s0t) ---> returns a list here!\n return Model.static_eval(q0, q1, s0, s1, s2, (s0lct, s0rct), (s1lct, s1rct))\n# return [eval(t) for t in self.templates.values()] ## eval exprs are the values, not keys\n\n def write(self, filename=\"-\", weights=None):\n\n if weights is None:\n weights = self.weights\n\n if filename == \"-\":\n outfile = sys.stdout\n filename = \"STDOUT\" # careful overriding\n else:\n outfile = open(filename, \"wt\")\n\n self.print_templates(outfile)\n\n mytime = Mytime()\n\n nonzero = 0\n print >> logs, \"sorting %d features...\" % len(weights),\n for i, f in enumerate(sorted(weights), 1):\n if i == 1: # sorting done\n print >> logs, \"done in %.2lf seconds.\" % mytime.period()\n print >> logs, \"writing features to %s...\" % filename\n \n v = weights[f]\n if math.fabs(v) > 1e-3:\n print >> outfile, \"%s\\t%.5lf\" % (f, v)\n nonzero += 1\n\n if self.unk > 0: # print known words\n print >> outfile, \" \" + \" \".join(sorted(self.knowns)) # \" \" to mark\n\n print >> logs, \"%d nonzero feature instances written in %.2lf seconds.\" % \\\n (nonzero, mytime.period()) ## nonzero != i\n\n @staticmethod\n def trim(fv):\n for f in fv:\n if math.fabs(fv[f]) < 1e-3:\n del fv[f]\n return fv\n\n @staticmethod\n def static_eval((q0w, q0t), (q1w, q1t), (s0w, s0t), (s1w, s1t), (s2w, s2t), (s0lct, s0rct), (s1lct, s1rct)):\n return [\"q0t=%s\" % (q0t),\n \"q0w-q0t=%s|%s\" % (q0w, q0t),\n \"q0w=%s\" % (q0w),\n \"s0t-q0t-q1t=%s|%s|%s\" % (s0t, q0t, q1t),\n \"s0t-q0t=%s|%s\" % (s0t, q0t),\n \"s0t-s1t=%s|%s\" % (s0t, s1t),\n \"s0t-s1w-s1t=%s|%s|%s\" % (s0t, s1w, s1t),\n \"s0t=%s\" % (s0t),\n \"s0w-q0t-q1t=%s|%s|%s\" % (s0w, q0t, q1t),\n \"s0w-s0t-s1t=%s|%s|%s\" % (s0w, s0t, s1t),\n \"s0w-s0t-s1w-s1t=%s|%s|%s|%s\" % (s0w, s0t, s1w, s1t),\n \"s0w-s0t-s1w=%s|%s|%s\" % (s0w, s0t, s1w),\n \"s0w-s0t=%s|%s\" % (s0w, s0t),\n \"s0w-s1w-s1t=%s|%s|%s\" % (s0w, s1w, s1t),\n \"s0w-s1w=%s|%s\" % (s0w, s1w),\n \"s0w=%s\" % (s0w),\n \"s1t-s0t-q0t=%s|%s|%s\" % (s1t, s0t, q0t),\n \"s1t-s0t-s0lct=%s|%s|%s\" % (s1t, s0t, s0lct),\n \"s1t-s0t-s0rct=%s|%s|%s\" % (s1t, s0t, s0rct),\n \"s1t-s0w-q0t=%s|%s|%s\" % (s1t, s0w, q0t),\n \"s1t-s0w-s0lct=%s|%s|%s\" % (s1t, s0w, s0lct),\n \"s1t-s1lct-s0t=%s|%s|%s\" % (s1t, s1lct, s0t),\n \"s1t-s1lct-s0w=%s|%s|%s\" % (s1t, s1lct, s0w),\n \"s1t-s1rct-s0t=%s|%s|%s\" % (s1t, s1rct, s0t),\n \"s1t-s1rct-s0w=%s|%s|%s\" % (s1t, s1rct, s0w),\n \"s1t=%s\" % (s1t),\n \"s1w-s1t=%s|%s\" % (s1w, s1t),\n \"s1w=%s\" % (s1w),\n \"s2t-s1t-s0t=%s|%s|%s\" % (s2t, s1t, s0t)]\n\n def prune(self, filenames):\n '''prune features from word/tag lines'''\n\n print >> logs, \"pruning features using %s...\" % filenames,\n \n fullset = set()\n for filename in filenames.split():\n for l in open(filename):\n for w, t in map(lambda x:x.rsplit(\"/\", 1), l.split()):\n fullset.add(w)\n fullset.add(t)\n\n print >> logs, \"collected %d uniq words & tags...\" % (len(fullset)),\n\n new = new_vector() # Vector()\n for f in self.weights:\n\n stuff = f.split(\"=\", 1)[1].rsplit(\"=\", 1)[0].split(\"|\") ## b/w 1st and last \"=\", but caution\n for s in stuff:\n if s not in fullset:\n break\n else:\n new[f] = self.weights[f]\n\n print >> logs, \"%d features survived (ratio: %.2f)\" % (len(new), len(new) / len(self.weights))\n self.weights = new\n\n def sparsify(self, z=1):\n '''duchi et al., 2008'''\n \n \n\nif __name__ == \"__main__\":\n\n flags.DEFINE_string(\"prune\", None, \"prune features w.r.t. FILE (word/tag format)\")\n\n try:\n argv = FLAGS(sys.argv)\n if FLAGS.weights is None:\n raise flags.FlagsError(\"must specify weights by -w ...\")\n except flags.FlagsError, e:\n print >> logs, 'Error: %s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS)\n sys.exit(1)\n \n FLAGS.featstat = True\n \n model = Model(FLAGS.weights) #.model, FLAGS.weights)\n\n if FLAGS.prune:\n model.prune(FLAGS.prune)\n\n if FLAGS.outputweights:\n model.write(FLAGS.outputweights)\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',
'0004_playercards')]
operations = [migrations.RenameModel(old_name='PlayerCards', new_name=
'PlayerCard'), migrations.RemoveField(model_name='profile', name=
'cards')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',
'0004_playercards')]
operations = [migrations.RenameModel(old_name='PlayerCards', new_name=
'PlayerCard'), migrations.RemoveField(model_name='profile', name=
'cards')]
<|reserved_special_token_1|>
# Generated by Django 2.1.2 on 2018-10-26 12:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20181010_0852'),
('accounts', '0004_playercards'),
]
operations = [
migrations.RenameModel(
old_name='PlayerCards',
new_name='PlayerCard',
),
migrations.RemoveField(
model_name='profile',
name='cards',
),
]
|
flexible
|
{
"blob_id": "59596c69df6a2c453fd147a9c8a2c7d47ed79fb3",
"index": 3222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',\n '0004_playercards')]\n operations = [migrations.RenameModel(old_name='PlayerCards', new_name=\n 'PlayerCard'), migrations.RemoveField(model_name='profile', name=\n 'cards')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0007_auto_20181010_0852'), ('accounts',\n '0004_playercards')]\n operations = [migrations.RenameModel(old_name='PlayerCards', new_name=\n 'PlayerCard'), migrations.RemoveField(model_name='profile', name=\n 'cards')]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-26 12:40\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0007_auto_20181010_0852'),\n ('accounts', '0004_playercards'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='PlayerCards',\n new_name='PlayerCard',\n ),\n migrations.RemoveField(\n model_name='profile',\n name='cards',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def greatestCommonFactor(posInt1, posInt2):
range_posInt1 = list(range(1, posInt1 + 1))
factors_posInt1 = []
for i in range_posInt1:
if posInt1 % i == 0:
factors_posInt1.append(i)
range_posInt2 = list(range(1, posInt2 + 1))
factors_posInt2 = []
for i in range_posInt2:
if posInt2 % i == 0:
factors_posInt2.append(i)
result = 1
for factor_posInt1 in factors_posInt1:
if factor_posInt1 in factors_posInt2:
result = factor_posInt1
print('The greatest common factor of', posInt1, 'and', posInt2, 'is:')
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def greatestCommonFactor(posInt1, posInt2):
range_posInt1 = list(range(1, posInt1 + 1))
factors_posInt1 = []
for i in range_posInt1:
if posInt1 % i == 0:
factors_posInt1.append(i)
range_posInt2 = list(range(1, posInt2 + 1))
factors_posInt2 = []
for i in range_posInt2:
if posInt2 % i == 0:
factors_posInt2.append(i)
result = 1
for factor_posInt1 in factors_posInt1:
if factor_posInt1 in factors_posInt2:
result = factor_posInt1
print('The greatest common factor of', posInt1, 'and', posInt2, 'is:')
return result
print(greatestCommonFactor(9, 12))
print(greatestCommonFactor(6, 18))
print(greatestCommonFactor(11, 4))
<|reserved_special_token_1|>
# 10.13.20 - sjg
# Exercise 15 - solution A
# Write a function called greatestCommomFactor that,
#given two distinct positive integers,
#returns the greatest common factor of those two values
#Input: greatestCommonFactor(9,12)
#Output: 3
#Input: greatestCommonFactor(6,18)
#Output: 6
#Input: greatestCommonFactor(11,4)
#Output: 1
def greatestCommonFactor(posInt1, posInt2):
#range of posInt1, plus posInt1
range_posInt1 = list(range(1,posInt1+1))
# list of factors
factors_posInt1 = []
#iterating i through range_posInt1, starting i == 1, if int % i is zero,
#then divisible, meaning it's a factor, so add i in those cases to list of factors
#dont use a break statement bc each integer needs to be checked within the range list
for i in range_posInt1:
if posInt1 % i == 0:
factors_posInt1.append(i)
#range of posInt2, plus posInt2
range_posInt2 = list(range(1,posInt2+1))
#factors_posInt2 - list of factors, create empty list
factors_posInt2 = []
for i in range_posInt2:
if posInt2 % i == 0:
factors_posInt2.append(i)
#define int variable, result, which will house the greatest common factor between the 2 parameters,
# and default value being 1 bc for posInts, the smallest factor of a pos # would be 1
result = 1
#iterating through the factors of posInt1,
for factor_posInt1 in factors_posInt1:
#if particular value is present in factors_posInt2
if factor_posInt1 in factors_posInt2:
#set result equal to that factor
result = factor_posInt1
#no break statement bc need to go through entire for loop to get answer
print("The greatest common factor of", posInt1, "and", posInt2, "is:")
return result
print(greatestCommonFactor(9,12))
print(greatestCommonFactor(6,18))
print(greatestCommonFactor(11,4))
|
flexible
|
{
"blob_id": "a3f6ea649fc5e60b0f8353b1404912d060686b99",
"index": 9550,
"step-1": "<mask token>\n",
"step-2": "def greatestCommonFactor(posInt1, posInt2):\n range_posInt1 = list(range(1, posInt1 + 1))\n factors_posInt1 = []\n for i in range_posInt1:\n if posInt1 % i == 0:\n factors_posInt1.append(i)\n range_posInt2 = list(range(1, posInt2 + 1))\n factors_posInt2 = []\n for i in range_posInt2:\n if posInt2 % i == 0:\n factors_posInt2.append(i)\n result = 1\n for factor_posInt1 in factors_posInt1:\n if factor_posInt1 in factors_posInt2:\n result = factor_posInt1\n print('The greatest common factor of', posInt1, 'and', posInt2, 'is:')\n return result\n\n\n<mask token>\n",
"step-3": "def greatestCommonFactor(posInt1, posInt2):\n range_posInt1 = list(range(1, posInt1 + 1))\n factors_posInt1 = []\n for i in range_posInt1:\n if posInt1 % i == 0:\n factors_posInt1.append(i)\n range_posInt2 = list(range(1, posInt2 + 1))\n factors_posInt2 = []\n for i in range_posInt2:\n if posInt2 % i == 0:\n factors_posInt2.append(i)\n result = 1\n for factor_posInt1 in factors_posInt1:\n if factor_posInt1 in factors_posInt2:\n result = factor_posInt1\n print('The greatest common factor of', posInt1, 'and', posInt2, 'is:')\n return result\n\n\nprint(greatestCommonFactor(9, 12))\nprint(greatestCommonFactor(6, 18))\nprint(greatestCommonFactor(11, 4))\n",
"step-4": "# 10.13.20 - sjg \n# Exercise 15 - solution A\n# Write a function called greatestCommomFactor that, \n#given two distinct positive integers,\n#returns the greatest common factor of those two values\n\n#Input: greatestCommonFactor(9,12)\n#Output: 3\n\n#Input: greatestCommonFactor(6,18)\n#Output: 6\n\n#Input: greatestCommonFactor(11,4)\n#Output: 1\n\ndef greatestCommonFactor(posInt1, posInt2):\n\t\n\t#range of posInt1, plus posInt1\n\trange_posInt1 = list(range(1,posInt1+1))\n\n\t# list of factors\n\tfactors_posInt1 = []\n\n\t#iterating i through range_posInt1, starting i == 1, if int % i is zero, \n\t#then divisible, meaning it's a factor, so add i in those cases to list of factors\n\t#dont use a break statement bc each integer needs to be checked within the range list\n\n\tfor i in range_posInt1:\n\t\tif posInt1 % i == 0:\n\t\t\tfactors_posInt1.append(i)\n\n\t#range of posInt2, plus posInt2\n\trange_posInt2 = list(range(1,posInt2+1))\n\n\t#factors_posInt2 - list of factors, create empty list\n\tfactors_posInt2 = []\n\n\tfor i in range_posInt2: \n\t\tif posInt2 % i == 0:\n\t\t\tfactors_posInt2.append(i)\n\n\t#define int variable, result, which will house the greatest common factor between the 2 parameters,\n\t# and default value being 1 bc for posInts, the smallest factor of a pos # would be 1\n\tresult = 1\n\n\n\t#iterating through the factors of posInt1, \n\tfor factor_posInt1 in factors_posInt1:\n\t\t#if particular value is present in factors_posInt2\n\t\tif factor_posInt1 in factors_posInt2:\n\n\t\t#set result equal to that factor\n\t\t\tresult = factor_posInt1\n\t\t\n\t\t#no break statement bc need to go through entire for loop to get answer\n\n\tprint(\"The greatest common factor of\", posInt1, \"and\", posInt2, \"is:\")\n\treturn result\n\n\nprint(greatestCommonFactor(9,12))\nprint(greatestCommonFactor(6,18))\nprint(greatestCommonFactor(11,4))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from os.path import abspath, dirname, join, basename
import numpy as np
import cv2
import xiuminglib as xm
logger, thisfile = xm.config.create_logger(abspath(__file__))
class EXR():
"""Reads EXR files.
EXR files can be generic or physically meaningful, such as depth, normal, etc.
When data loaded are physically meaningful, these methods assume the EXR files
are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.
Args:
exr_path (str, optional): Path to the EXR file.
Attributes:
exr_f (str): Path to the EXR file.
data (dict): Data loaded.
"""
def __init__(self, exr_path=None):
self.exr_f = exr_path
if self.exr_f is not None:
self.data = self.load()
def load(self):
r"""Loads an EXR as a dictionary of NumPy arrays.
Requires writing a .npz to ``/tmp/`` and then loading it, because
the conversion process has to be done in Python 2.x as a subprocess call,
unfortuantely. If :math:`\leq3` channels, can use OpenCV for in-memory loading.
Returns:
dict: Loaded EXR data.
"""
from time import time
from subprocess import Popen
logger_name = thisfile + '->EXR:load()'
assert self.exr_f is not None, "Set the exr_f first"
npz_f = '/tmp/%s_t%s.npz' % \
(basename(self.exr_f).replace('.exr', ''), time())
# Convert to .npz
# cv2.imread() can't load more than three channels from .exr even with IMREAD_UNCHANGED
# Has to go through IO. Maybe there's a better way?
cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')
bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)
process = Popen(bash_cmd.split(), cwd=cwd)
_, _ = process.communicate()
# Load this .npz
data = np.load(npz_f)
logger.name = logger_name
logger.info("Loaded %s", self.exr_f)
return data
def extract_depth(self, alpha_exr, outpath, vis=False):
"""Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.
Output has black background, with bright values for closeness to the camera.
If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.
Args:
alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.
outpath (str): Path to the result .npy file.
vis (bool, optional): Whether to visualize the raw values as an image.
Writes
- A .npy file containing an aliased depth map and its alpha map.
- If ``vis``, a .png image of anti-aliased depth.
"""
logger_name = thisfile + '->EXR:extract_depth()'
dtype = 'uint8'
dtype_max = np.iinfo(dtype).max
# Load alpha
arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)
assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] == arr[:, :, 2]).all(), \
"A valid alpha map must have all three channels the same"
alpha = arr[:, :, 0]
# Load depth
arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)
assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[..., 2]).all(), \
"A valid depth map must have all three channels the same"
depth = arr[..., 0] # these raw values are aliased, so only one crazy big value
if not outpath.endswith('.npy'):
outpath += '.npy'
np.save(outpath, np.dstack((arr, alpha)))
if vis:
is_fg = depth < depth.max()
max_val = depth[is_fg].max()
depth[depth > max_val] = max_val # cap background depth at the object maximum depth
min_val = depth.min()
im = dtype_max * (max_val - depth) / (max_val - min_val) # [0, dtype_max]
# Anti-aliasing
bg = np.zeros(im.shape)
im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)
cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))
logger.name = logger_name
logger.info("Depth image extractd to %s", outpath)
def extract_normal(self, outpath, vis=False):
"""Converts an RGBA EXR normal map to a .npy normal map.
The background is black, complying with industry standards (e.g., Adobe AE).
Args:
outpath (str): Path to the result .npy file.
vis (bool, optional): Whether to visualize the normal vectors as an image.
Writes
- A .npy file containing an aliased normal map and its alpha map.
- If ``vis``, a .png visualization of anti-aliased normals.
"""
logger_name = thisfile + '->extract_normal()'
dtype = 'uint8'
dtype_max = np.iinfo(dtype).max
# Load RGBA .exr
data = self.data
arr = np.dstack((data['R'], data['G'], data['B']))
alpha = data['A']
if not outpath.endswith('.npy'):
outpath += '.npy'
np.save(outpath, np.dstack((arr, alpha)))
if vis:
# [-1, 1]
im = (1 - (arr / 2 + 0.5)) * dtype_max
# [0, dtype_max]
bg = np.zeros(im.shape)
alpha = np.dstack((alpha, alpha, alpha))
im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)
cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])
logger.name = logger_name
logger.info("Normal image extractd to %s", outpath)
def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):
"""Extract intrinsic images from an EXR of lighting passes into multiple .npy files.
Args:
outdir (str): Directory to save the result .npy files to.
vis (bool, optional): Whether to visualize the values as images.
Writes
- albedo.npy (and its visualization if ``vis``).
- shading.npy (ditto).
- specularity.npy (ditto).
- recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.
- composite.npy (ditto): composite by Blender.
"""
logger_name = thisfile + '->extract_intrinsic_images_from_lighting_passes()'
xm.general.makedirs(outdir)
data = self.data
def collapse_passes(components):
ch_arrays = []
for ch in ['R', 'G', 'B']:
comp_arrs = []
for comp in components:
comp_arrs.append(data[comp + '.' + ch])
ch_array = np.sum(comp_arrs, axis=0) # sum components
ch_arrays.append(ch_array)
# Handle alpha channel
first_alpha = data[components[0] + '.A']
for ci in range(1, len(components)):
assert (first_alpha == data[components[ci] + '.A']).all(), \
"Alpha channels of all passes must be the same"
ch_arrays.append(first_alpha)
return np.dstack(ch_arrays)
# Albedo
albedo = collapse_passes(['diffuse_color', 'glossy_color'])
np.save(join(outdir, 'albedo.npy'), albedo)
if vis:
xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))
# Shading
shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])
np.save(join(outdir, 'shading.npy'), shading)
if vis:
xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))
# Specularity
specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])
np.save(join(outdir, 'specularity.npy'), specularity)
if vis:
xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png'))
# Reconstruction vs. ...
recon = np.multiply(albedo, shading) + specularity
recon[:, :, 3] = albedo[:, :, 3] # can't add up alpha channels
np.save(join(outdir, 'recon.npy'), recon)
if vis:
xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))
# ... composite from Blender, just for sanity check
composite = collapse_passes(['composite'])
np.save(join(outdir, 'composite.npy'), composite)
if vis:
xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))
logger.name = logger_name
logger.info("Intrinsic images extracted to %s", outdir)
def main():
"""Unit tests that can also serve as example usage."""
tmp_dir = xm.constants['dir_tmp']
exr_f = join(tmp_dir, 'test.exr')
exr = EXR(exr_f)
exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "b9cce77d4d2b9ff5563d17927e21166f9c870e3d",
"index": 5220,
"step-1": "<mask token>\n\n\nclass EXR:\n \"\"\"Reads EXR files.\n\n EXR files can be generic or physically meaningful, such as depth, normal, etc.\n When data loaded are physically meaningful, these methods assume the EXR files\n are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.\n\n Args:\n exr_path (str, optional): Path to the EXR file.\n\n Attributes:\n exr_f (str): Path to the EXR file.\n data (dict): Data loaded.\n \"\"\"\n\n def __init__(self, exr_path=None):\n self.exr_f = exr_path\n if self.exr_f is not None:\n self.data = self.load()\n\n def load(self):\n \"\"\"Loads an EXR as a dictionary of NumPy arrays.\n\n Requires writing a .npz to ``/tmp/`` and then loading it, because\n the conversion process has to be done in Python 2.x as a subprocess call,\n unfortuantely. If :math:`\\\\leq3` channels, can use OpenCV for in-memory loading.\n\n Returns:\n dict: Loaded EXR data.\n \"\"\"\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, 'Set the exr_f first'\n npz_f = '/tmp/%s_t%s.npz' % (basename(self.exr_f).replace('.exr',\n ''), time())\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info('Loaded %s', self.exr_f)\n return data\n\n def extract_depth(self, alpha_exr, outpath, vis=False):\n \"\"\"Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.\n\n Output has black background, with bright values for closeness to the camera.\n If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.\n\n Args:\n alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the raw values as an image.\n\n Writes\n - A .npy file containing an aliased depth map and its alpha map.\n - If ``vis``, a .png image of anti-aliased depth.\n \"\"\"\n logger_name = thisfile + '->EXR:extract_depth()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)\n assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] ==\n arr[:, :, 2]).all(\n ), 'A valid alpha map must have all three channels the same'\n alpha = arr[:, :, 0]\n arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)\n assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[\n ..., 2]).all(\n ), 'A valid depth map must have all three channels the same'\n depth = arr[..., 0]\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n is_fg = depth < depth.max()\n max_val = depth[is_fg].max()\n depth[depth > max_val] = max_val\n min_val = depth.min()\n im = dtype_max * (max_val - depth) / (max_val - min_val)\n bg = np.zeros(im.shape)\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))\n logger.name = logger_name\n logger.info('Depth image extractd to %s', outpath)\n\n def extract_normal(self, outpath, vis=False):\n \"\"\"Converts an RGBA EXR normal map to a .npy normal map.\n\n The background is black, complying with industry standards (e.g., Adobe AE).\n\n Args:\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the normal vectors as an image.\n\n Writes\n - A .npy file containing an aliased normal map and its alpha map.\n - If ``vis``, a .png visualization of anti-aliased normals.\n \"\"\"\n logger_name = thisfile + '->extract_normal()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n data = self.data\n arr = np.dstack((data['R'], data['G'], data['B']))\n alpha = data['A']\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n im = (1 - (arr / 2 + 0.5)) * dtype_max\n bg = np.zeros(im.shape)\n alpha = np.dstack((alpha, alpha, alpha))\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])\n logger.name = logger_name\n logger.info('Normal image extractd to %s', outpath)\n\n def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n \"\"\"Extract intrinsic images from an EXR of lighting passes into multiple .npy files.\n\n Args:\n outdir (str): Directory to save the result .npy files to.\n vis (bool, optional): Whether to visualize the values as images.\n\n Writes\n - albedo.npy (and its visualization if ``vis``).\n - shading.npy (ditto).\n - specularity.npy (ditto).\n - recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.\n - composite.npy (ditto): composite by Blender.\n \"\"\"\n logger_name = (thisfile +\n '->extract_intrinsic_images_from_lighting_passes()')\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0)\n ch_arrays.append(ch_array)\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(\n ), 'Alpha channels of all passes must be the same'\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png')\n )\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3]\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info('Intrinsic images extracted to %s', outdir)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass EXR:\n \"\"\"Reads EXR files.\n\n EXR files can be generic or physically meaningful, such as depth, normal, etc.\n When data loaded are physically meaningful, these methods assume the EXR files\n are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.\n\n Args:\n exr_path (str, optional): Path to the EXR file.\n\n Attributes:\n exr_f (str): Path to the EXR file.\n data (dict): Data loaded.\n \"\"\"\n\n def __init__(self, exr_path=None):\n self.exr_f = exr_path\n if self.exr_f is not None:\n self.data = self.load()\n\n def load(self):\n \"\"\"Loads an EXR as a dictionary of NumPy arrays.\n\n Requires writing a .npz to ``/tmp/`` and then loading it, because\n the conversion process has to be done in Python 2.x as a subprocess call,\n unfortuantely. If :math:`\\\\leq3` channels, can use OpenCV for in-memory loading.\n\n Returns:\n dict: Loaded EXR data.\n \"\"\"\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, 'Set the exr_f first'\n npz_f = '/tmp/%s_t%s.npz' % (basename(self.exr_f).replace('.exr',\n ''), time())\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info('Loaded %s', self.exr_f)\n return data\n\n def extract_depth(self, alpha_exr, outpath, vis=False):\n \"\"\"Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.\n\n Output has black background, with bright values for closeness to the camera.\n If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.\n\n Args:\n alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the raw values as an image.\n\n Writes\n - A .npy file containing an aliased depth map and its alpha map.\n - If ``vis``, a .png image of anti-aliased depth.\n \"\"\"\n logger_name = thisfile + '->EXR:extract_depth()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)\n assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] ==\n arr[:, :, 2]).all(\n ), 'A valid alpha map must have all three channels the same'\n alpha = arr[:, :, 0]\n arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)\n assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[\n ..., 2]).all(\n ), 'A valid depth map must have all three channels the same'\n depth = arr[..., 0]\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n is_fg = depth < depth.max()\n max_val = depth[is_fg].max()\n depth[depth > max_val] = max_val\n min_val = depth.min()\n im = dtype_max * (max_val - depth) / (max_val - min_val)\n bg = np.zeros(im.shape)\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))\n logger.name = logger_name\n logger.info('Depth image extractd to %s', outpath)\n\n def extract_normal(self, outpath, vis=False):\n \"\"\"Converts an RGBA EXR normal map to a .npy normal map.\n\n The background is black, complying with industry standards (e.g., Adobe AE).\n\n Args:\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the normal vectors as an image.\n\n Writes\n - A .npy file containing an aliased normal map and its alpha map.\n - If ``vis``, a .png visualization of anti-aliased normals.\n \"\"\"\n logger_name = thisfile + '->extract_normal()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n data = self.data\n arr = np.dstack((data['R'], data['G'], data['B']))\n alpha = data['A']\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n im = (1 - (arr / 2 + 0.5)) * dtype_max\n bg = np.zeros(im.shape)\n alpha = np.dstack((alpha, alpha, alpha))\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])\n logger.name = logger_name\n logger.info('Normal image extractd to %s', outpath)\n\n def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n \"\"\"Extract intrinsic images from an EXR of lighting passes into multiple .npy files.\n\n Args:\n outdir (str): Directory to save the result .npy files to.\n vis (bool, optional): Whether to visualize the values as images.\n\n Writes\n - albedo.npy (and its visualization if ``vis``).\n - shading.npy (ditto).\n - specularity.npy (ditto).\n - recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.\n - composite.npy (ditto): composite by Blender.\n \"\"\"\n logger_name = (thisfile +\n '->extract_intrinsic_images_from_lighting_passes()')\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0)\n ch_arrays.append(ch_array)\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(\n ), 'Alpha channels of all passes must be the same'\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png')\n )\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3]\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info('Intrinsic images extracted to %s', outdir)\n\n\ndef main():\n \"\"\"Unit tests that can also serve as example usage.\"\"\"\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogger, thisfile = xm.config.create_logger(abspath(__file__))\n\n\nclass EXR:\n \"\"\"Reads EXR files.\n\n EXR files can be generic or physically meaningful, such as depth, normal, etc.\n When data loaded are physically meaningful, these methods assume the EXR files\n are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.\n\n Args:\n exr_path (str, optional): Path to the EXR file.\n\n Attributes:\n exr_f (str): Path to the EXR file.\n data (dict): Data loaded.\n \"\"\"\n\n def __init__(self, exr_path=None):\n self.exr_f = exr_path\n if self.exr_f is not None:\n self.data = self.load()\n\n def load(self):\n \"\"\"Loads an EXR as a dictionary of NumPy arrays.\n\n Requires writing a .npz to ``/tmp/`` and then loading it, because\n the conversion process has to be done in Python 2.x as a subprocess call,\n unfortuantely. If :math:`\\\\leq3` channels, can use OpenCV for in-memory loading.\n\n Returns:\n dict: Loaded EXR data.\n \"\"\"\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, 'Set the exr_f first'\n npz_f = '/tmp/%s_t%s.npz' % (basename(self.exr_f).replace('.exr',\n ''), time())\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info('Loaded %s', self.exr_f)\n return data\n\n def extract_depth(self, alpha_exr, outpath, vis=False):\n \"\"\"Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.\n\n Output has black background, with bright values for closeness to the camera.\n If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.\n\n Args:\n alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the raw values as an image.\n\n Writes\n - A .npy file containing an aliased depth map and its alpha map.\n - If ``vis``, a .png image of anti-aliased depth.\n \"\"\"\n logger_name = thisfile + '->EXR:extract_depth()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)\n assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] ==\n arr[:, :, 2]).all(\n ), 'A valid alpha map must have all three channels the same'\n alpha = arr[:, :, 0]\n arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)\n assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[\n ..., 2]).all(\n ), 'A valid depth map must have all three channels the same'\n depth = arr[..., 0]\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n is_fg = depth < depth.max()\n max_val = depth[is_fg].max()\n depth[depth > max_val] = max_val\n min_val = depth.min()\n im = dtype_max * (max_val - depth) / (max_val - min_val)\n bg = np.zeros(im.shape)\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))\n logger.name = logger_name\n logger.info('Depth image extractd to %s', outpath)\n\n def extract_normal(self, outpath, vis=False):\n \"\"\"Converts an RGBA EXR normal map to a .npy normal map.\n\n The background is black, complying with industry standards (e.g., Adobe AE).\n\n Args:\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the normal vectors as an image.\n\n Writes\n - A .npy file containing an aliased normal map and its alpha map.\n - If ``vis``, a .png visualization of anti-aliased normals.\n \"\"\"\n logger_name = thisfile + '->extract_normal()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n data = self.data\n arr = np.dstack((data['R'], data['G'], data['B']))\n alpha = data['A']\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n im = (1 - (arr / 2 + 0.5)) * dtype_max\n bg = np.zeros(im.shape)\n alpha = np.dstack((alpha, alpha, alpha))\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])\n logger.name = logger_name\n logger.info('Normal image extractd to %s', outpath)\n\n def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n \"\"\"Extract intrinsic images from an EXR of lighting passes into multiple .npy files.\n\n Args:\n outdir (str): Directory to save the result .npy files to.\n vis (bool, optional): Whether to visualize the values as images.\n\n Writes\n - albedo.npy (and its visualization if ``vis``).\n - shading.npy (ditto).\n - specularity.npy (ditto).\n - recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.\n - composite.npy (ditto): composite by Blender.\n \"\"\"\n logger_name = (thisfile +\n '->extract_intrinsic_images_from_lighting_passes()')\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0)\n ch_arrays.append(ch_array)\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(\n ), 'Alpha channels of all passes must be the same'\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png')\n )\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3]\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info('Intrinsic images extracted to %s', outdir)\n\n\ndef main():\n \"\"\"Unit tests that can also serve as example usage.\"\"\"\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from os.path import abspath, dirname, join, basename\nimport numpy as np\nimport cv2\nimport xiuminglib as xm\nlogger, thisfile = xm.config.create_logger(abspath(__file__))\n\n\nclass EXR:\n \"\"\"Reads EXR files.\n\n EXR files can be generic or physically meaningful, such as depth, normal, etc.\n When data loaded are physically meaningful, these methods assume the EXR files\n are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.\n\n Args:\n exr_path (str, optional): Path to the EXR file.\n\n Attributes:\n exr_f (str): Path to the EXR file.\n data (dict): Data loaded.\n \"\"\"\n\n def __init__(self, exr_path=None):\n self.exr_f = exr_path\n if self.exr_f is not None:\n self.data = self.load()\n\n def load(self):\n \"\"\"Loads an EXR as a dictionary of NumPy arrays.\n\n Requires writing a .npz to ``/tmp/`` and then loading it, because\n the conversion process has to be done in Python 2.x as a subprocess call,\n unfortuantely. If :math:`\\\\leq3` channels, can use OpenCV for in-memory loading.\n\n Returns:\n dict: Loaded EXR data.\n \"\"\"\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, 'Set the exr_f first'\n npz_f = '/tmp/%s_t%s.npz' % (basename(self.exr_f).replace('.exr',\n ''), time())\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info('Loaded %s', self.exr_f)\n return data\n\n def extract_depth(self, alpha_exr, outpath, vis=False):\n \"\"\"Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.\n\n Output has black background, with bright values for closeness to the camera.\n If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.\n\n Args:\n alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the raw values as an image.\n\n Writes\n - A .npy file containing an aliased depth map and its alpha map.\n - If ``vis``, a .png image of anti-aliased depth.\n \"\"\"\n logger_name = thisfile + '->EXR:extract_depth()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)\n assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] ==\n arr[:, :, 2]).all(\n ), 'A valid alpha map must have all three channels the same'\n alpha = arr[:, :, 0]\n arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)\n assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[\n ..., 2]).all(\n ), 'A valid depth map must have all three channels the same'\n depth = arr[..., 0]\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n is_fg = depth < depth.max()\n max_val = depth[is_fg].max()\n depth[depth > max_val] = max_val\n min_val = depth.min()\n im = dtype_max * (max_val - depth) / (max_val - min_val)\n bg = np.zeros(im.shape)\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))\n logger.name = logger_name\n logger.info('Depth image extractd to %s', outpath)\n\n def extract_normal(self, outpath, vis=False):\n \"\"\"Converts an RGBA EXR normal map to a .npy normal map.\n\n The background is black, complying with industry standards (e.g., Adobe AE).\n\n Args:\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the normal vectors as an image.\n\n Writes\n - A .npy file containing an aliased normal map and its alpha map.\n - If ``vis``, a .png visualization of anti-aliased normals.\n \"\"\"\n logger_name = thisfile + '->extract_normal()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n data = self.data\n arr = np.dstack((data['R'], data['G'], data['B']))\n alpha = data['A']\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n im = (1 - (arr / 2 + 0.5)) * dtype_max\n bg = np.zeros(im.shape)\n alpha = np.dstack((alpha, alpha, alpha))\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])\n logger.name = logger_name\n logger.info('Normal image extractd to %s', outpath)\n\n def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n \"\"\"Extract intrinsic images from an EXR of lighting passes into multiple .npy files.\n\n Args:\n outdir (str): Directory to save the result .npy files to.\n vis (bool, optional): Whether to visualize the values as images.\n\n Writes\n - albedo.npy (and its visualization if ``vis``).\n - shading.npy (ditto).\n - specularity.npy (ditto).\n - recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.\n - composite.npy (ditto): composite by Blender.\n \"\"\"\n logger_name = (thisfile +\n '->extract_intrinsic_images_from_lighting_passes()')\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0)\n ch_arrays.append(ch_array)\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(\n ), 'Alpha channels of all passes must be the same'\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png')\n )\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3]\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info('Intrinsic images extracted to %s', outdir)\n\n\ndef main():\n \"\"\"Unit tests that can also serve as example usage.\"\"\"\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from os.path import abspath, dirname, join, basename\nimport numpy as np\nimport cv2\n\nimport xiuminglib as xm\n\nlogger, thisfile = xm.config.create_logger(abspath(__file__))\n\n\nclass EXR():\n \"\"\"Reads EXR files.\n\n EXR files can be generic or physically meaningful, such as depth, normal, etc.\n When data loaded are physically meaningful, these methods assume the EXR files\n are produced by :mod:`xiuminglib.blender.render` and hence follow certain formats.\n\n Args:\n exr_path (str, optional): Path to the EXR file.\n\n Attributes:\n exr_f (str): Path to the EXR file.\n data (dict): Data loaded.\n \"\"\"\n def __init__(self, exr_path=None):\n self.exr_f = exr_path\n if self.exr_f is not None:\n self.data = self.load()\n\n def load(self):\n r\"\"\"Loads an EXR as a dictionary of NumPy arrays.\n\n Requires writing a .npz to ``/tmp/`` and then loading it, because\n the conversion process has to be done in Python 2.x as a subprocess call,\n unfortuantely. If :math:`\\leq3` channels, can use OpenCV for in-memory loading.\n\n Returns:\n dict: Loaded EXR data.\n \"\"\"\n from time import time\n from subprocess import Popen\n logger_name = thisfile + '->EXR:load()'\n assert self.exr_f is not None, \"Set the exr_f first\"\n npz_f = '/tmp/%s_t%s.npz' % \\\n (basename(self.exr_f).replace('.exr', ''), time())\n # Convert to .npz\n # cv2.imread() can't load more than three channels from .exr even with IMREAD_UNCHANGED\n # Has to go through IO. Maybe there's a better way?\n cwd = join(dirname(abspath(__file__)), '..', '..', 'cli')\n bash_cmd = 'python2 exr2npz.py %s %s' % (self.exr_f, npz_f)\n process = Popen(bash_cmd.split(), cwd=cwd)\n _, _ = process.communicate()\n # Load this .npz\n data = np.load(npz_f)\n logger.name = logger_name\n logger.info(\"Loaded %s\", self.exr_f)\n return data\n\n def extract_depth(self, alpha_exr, outpath, vis=False):\n \"\"\"Combines a raw (aliased) depth map and its alpha map into anti-aliased depth.\n\n Output has black background, with bright values for closeness to the camera.\n If the alpha map is anti-aliased, the result depth map will be nicely anti-aliased.\n\n Args:\n alpha_exr (str): Path to the EXR file of the anti-aliased alpha map.\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the raw values as an image.\n\n Writes\n - A .npy file containing an aliased depth map and its alpha map.\n - If ``vis``, a .png image of anti-aliased depth.\n \"\"\"\n logger_name = thisfile + '->EXR:extract_depth()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n # Load alpha\n arr = cv2.imread(alpha_exr, cv2.IMREAD_UNCHANGED)\n assert (arr[:, :, 0] == arr[:, :, 1]).all() and (arr[:, :, 1] == arr[:, :, 2]).all(), \\\n \"A valid alpha map must have all three channels the same\"\n alpha = arr[:, :, 0]\n # Load depth\n arr = cv2.imread(self.exr_f, cv2.IMREAD_UNCHANGED)\n assert (arr[..., 0] == arr[..., 1]).all() and (arr[..., 1] == arr[..., 2]).all(), \\\n \"A valid depth map must have all three channels the same\"\n depth = arr[..., 0] # these raw values are aliased, so only one crazy big value\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n is_fg = depth < depth.max()\n max_val = depth[is_fg].max()\n depth[depth > max_val] = max_val # cap background depth at the object maximum depth\n min_val = depth.min()\n im = dtype_max * (max_val - depth) / (max_val - min_val) # [0, dtype_max]\n # Anti-aliasing\n bg = np.zeros(im.shape)\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype))\n logger.name = logger_name\n logger.info(\"Depth image extractd to %s\", outpath)\n\n def extract_normal(self, outpath, vis=False):\n \"\"\"Converts an RGBA EXR normal map to a .npy normal map.\n\n The background is black, complying with industry standards (e.g., Adobe AE).\n\n Args:\n outpath (str): Path to the result .npy file.\n vis (bool, optional): Whether to visualize the normal vectors as an image.\n\n Writes\n - A .npy file containing an aliased normal map and its alpha map.\n - If ``vis``, a .png visualization of anti-aliased normals.\n \"\"\"\n logger_name = thisfile + '->extract_normal()'\n dtype = 'uint8'\n dtype_max = np.iinfo(dtype).max\n # Load RGBA .exr\n data = self.data\n arr = np.dstack((data['R'], data['G'], data['B']))\n alpha = data['A']\n if not outpath.endswith('.npy'):\n outpath += '.npy'\n np.save(outpath, np.dstack((arr, alpha)))\n if vis:\n # [-1, 1]\n im = (1 - (arr / 2 + 0.5)) * dtype_max\n # [0, dtype_max]\n bg = np.zeros(im.shape)\n alpha = np.dstack((alpha, alpha, alpha))\n im = np.multiply(alpha, im) + np.multiply(1 - alpha, bg)\n cv2.imwrite(outpath[:-4] + '.png', im.astype(dtype)[..., ::-1])\n logger.name = logger_name\n logger.info(\"Normal image extractd to %s\", outpath)\n\n def extract_intrinsic_images_from_lighting_passes(self, outdir, vis=False):\n \"\"\"Extract intrinsic images from an EXR of lighting passes into multiple .npy files.\n\n Args:\n outdir (str): Directory to save the result .npy files to.\n vis (bool, optional): Whether to visualize the values as images.\n\n Writes\n - albedo.npy (and its visualization if ``vis``).\n - shading.npy (ditto).\n - specularity.npy (ditto).\n - recon.npy (ditto): reconstruction by combining albedo, shading, and specularity.\n - composite.npy (ditto): composite by Blender.\n \"\"\"\n logger_name = thisfile + '->extract_intrinsic_images_from_lighting_passes()'\n xm.general.makedirs(outdir)\n data = self.data\n\n def collapse_passes(components):\n ch_arrays = []\n for ch in ['R', 'G', 'B']:\n comp_arrs = []\n for comp in components:\n comp_arrs.append(data[comp + '.' + ch])\n ch_array = np.sum(comp_arrs, axis=0) # sum components\n ch_arrays.append(ch_array)\n # Handle alpha channel\n first_alpha = data[components[0] + '.A']\n for ci in range(1, len(components)):\n assert (first_alpha == data[components[ci] + '.A']).all(), \\\n \"Alpha channels of all passes must be the same\"\n ch_arrays.append(first_alpha)\n return np.dstack(ch_arrays)\n\n # Albedo\n albedo = collapse_passes(['diffuse_color', 'glossy_color'])\n np.save(join(outdir, 'albedo.npy'), albedo)\n if vis:\n xm.vis.matrix_as_image(albedo, outpath=join(outdir, 'albedo.png'))\n # Shading\n shading = collapse_passes(['diffuse_indirect', 'diffuse_direct'])\n np.save(join(outdir, 'shading.npy'), shading)\n if vis:\n xm.vis.matrix_as_image(shading, join(outdir, 'shading.png'))\n # Specularity\n specularity = collapse_passes(['glossy_indirect', 'glossy_direct'])\n np.save(join(outdir, 'specularity.npy'), specularity)\n if vis:\n xm.vis.matrix_as_image(specularity, join(outdir, 'specularity.png'))\n # Reconstruction vs. ...\n recon = np.multiply(albedo, shading) + specularity\n recon[:, :, 3] = albedo[:, :, 3] # can't add up alpha channels\n np.save(join(outdir, 'recon.npy'), recon)\n if vis:\n xm.vis.matrix_as_image(recon, join(outdir, 'recon.png'))\n # ... composite from Blender, just for sanity check\n composite = collapse_passes(['composite'])\n np.save(join(outdir, 'composite.npy'), composite)\n if vis:\n xm.vis.matrix_as_image(composite, join(outdir, 'composite.png'))\n logger.name = logger_name\n logger.info(\"Intrinsic images extracted to %s\", outdir)\n\n\ndef main():\n \"\"\"Unit tests that can also serve as example usage.\"\"\"\n tmp_dir = xm.constants['dir_tmp']\n exr_f = join(tmp_dir, 'test.exr')\n exr = EXR(exr_f)\n exr.extract_normal(join(tmp_dir, 'test.png'), vis=True)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
import pprint
class ErrorResponseCollection(object):
def __init__(self, status, message, param = "message"):
self.status = status
self.message = message
self.param = param
def as_md(self):
return '\n\n> **%s**\n\n```\n{\n\n\t"%s": "%s"\n\n}\n\n```' % \
(self.message, self.param, self.message)
GET_401 = ErrorResponseCollection(
status= 401,
message = "Authentication credentials were not provided.",
param = "detail"
)
GET_REPO_STATUS_404 = ErrorResponseCollection(
status = 404,
message = "NOT FOUND"
)
class ResponseCollection(object):
def __init__(self, message=None, data=None):
self.message = message
self.data = data
if self.message == None:
self.message = " "
def as_md(self):
return '\n\n> **%s**\n\n```json\n%s\n\n```' % \
(self.message, pprint.pformat(self.data, width=20, indent=4))
GET_BRANCH_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = dict(branches=[
'master',
'develop',
'feature/get_repo'
])
)
GET_REPO_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = {
"repositories": [
{
"name": "dogproject",
"url": "https://github.com/<user>~~~~~~.git",
"latest_commit": "2019-09-12",
"latest_scan": "2019-09-15",
},
{
"name": "catproject1234533",
"url": "https://github.com/<user>~~~~~~.git",
"latest_commit": "2019-10-11",
"latest_scan": "2019-10-11",
},
],
"repository_size": 31
}
)
GET_COMMIT_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data ={
'commit': [
{'sha': '123133010b97571286b568432f63395d18a49e05',
'message': 'fix : remove comments and fix code'},
{'sha': '312313fc750cdea348e23145948d2ee58e29f483b',
'message': 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'},
{'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message': 'Initial commit'}]
}
)
GET_CODE_DETECT_STATUS_200 = ResponseCollection(
message = "HTTP_200_OK",
data = {
"category": [
"log_",
"Token",
"룰추가따라 늘어남",
"..."
],
"log_": [
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "",
"line2": "# Created by https://www.gitignore.io/api/git,python,django,pycharm+all",
"line3": "## HUFORMATION ##"
}
],
"Token": [
{
"file_name": "파일이름",
"line_number": 10,
"strings": "ddddd",
"line1": "탐지 줄 앞",
"line2": "탐지된 줄",
"line3": "탐지줄 다음"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "",
"line2": "# Created by https://www.gitignore.io/api/git,python,django,pycharm+all",
"line3": "## HUFORMATION ##"
}
],
"룰추가따라 늘어남": [
{
"file_name": "파일이름",
"line_number": 302,
"strings": "ddddd",
"line1": "탐지 줄 앞",
"line2": "탐지된 줄",
"line3": "탐지줄 다음"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
],
"...": [
{
"file_name": ".gitignore",
"line_number": 1,
"strings": "a",
"line1": "aa",
"line2": "~~a~~~",
"line3": "다음줄"
},
]
}
)
|
normal
|
{
"blob_id": "ade4d797a83eaa06e8bde90972a56376d7e0f55a",
"index": 6086,
"step-1": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n <mask token>\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n\n def __init__(self, status, message, param='message'):\n self.status = status\n self.message = message\n self.param = param\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\n<mask token>\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ErrorResponseCollection(object):\n\n def __init__(self, status, message, param='message'):\n self.status = status\n self.message = message\n self.param = param\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % (self\n .message, self.param, self.message)\n\n\nGET_401 = ErrorResponseCollection(status=401, message=\n 'Authentication credentials were not provided.', param='detail')\nGET_REPO_STATUS_404 = ErrorResponseCollection(status=404, message='NOT FOUND')\n\n\nclass ResponseCollection(object):\n\n def __init__(self, message=None, data=None):\n self.message = message\n self.data = data\n if self.message == None:\n self.message = ' '\n\n def as_md(self):\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % (self.message, pprint\n .pformat(self.data, width=20, indent=4))\n\n\nGET_BRANCH_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data=dict\n (branches=['master', 'develop', 'feature/get_repo']))\nGET_REPO_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={\n 'repositories': [{'name': 'dogproject', 'url':\n 'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-09-12',\n 'latest_scan': '2019-09-15'}, {'name': 'catproject1234533', 'url':\n 'https://github.com/<user>~~~~~~.git', 'latest_commit': '2019-10-11',\n 'latest_scan': '2019-10-11'}], 'repository_size': 31})\nGET_COMMIT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data={\n 'commit': [{'sha': '123133010b97571286b568432f63395d18a49e05',\n 'message': 'fix : remove comments and fix code'}, {'sha':\n '312313fc750cdea348e23145948d2ee58e29f483b', 'message':\n 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'\n }, {'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message':\n 'Initial commit'}]})\nGET_CODE_DETECT_STATUS_200 = ResponseCollection(message='HTTP_200_OK', data\n ={'category': ['log_', 'Token', '룰추가따라 늘어남', '...'], 'log_': [{\n 'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':\n '', 'line2':\n '# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',\n 'line3': '## HUFORMATION ##'}], 'Token': [{'file_name': '파일이름',\n 'line_number': 10, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':\n '탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':\n 1, 'strings': 'a', 'line1': '', 'line2':\n '# Created by https://www.gitignore.io/api/git,python,django,pycharm+all',\n 'line3': '## HUFORMATION ##'}], '룰추가따라 늘어남': [{'file_name': '파일이름',\n 'line_number': 302, 'strings': 'ddddd', 'line1': '탐지 줄 앞', 'line2':\n '탐지된 줄', 'line3': '탐지줄 다음'}, {'file_name': '.gitignore', 'line_number':\n 1, 'strings': 'a', 'line1': 'aa', 'line2': '~~a~~~', 'line3': '다음줄'}, {\n 'file_name': '.gitignore', 'line_number': 1, 'strings': 'a', 'line1':\n 'aa', 'line2': '~~a~~~', 'line3': '다음줄'}], '...': [{'file_name':\n '.gitignore', 'line_number': 1, 'strings': 'a', 'line1': 'aa', 'line2':\n '~~a~~~', 'line3': '다음줄'}]})\n",
"step-5": "import pprint\r\n\r\nclass ErrorResponseCollection(object):\r\n def __init__(self, status, message, param = \"message\"):\r\n self.status = status\r\n self.message = message\r\n self.param = param\r\n\r\n def as_md(self):\r\n return '\\n\\n> **%s**\\n\\n```\\n{\\n\\n\\t\"%s\": \"%s\"\\n\\n}\\n\\n```' % \\\r\n (self.message, self.param, self.message)\r\n\r\nGET_401 = ErrorResponseCollection(\r\n status= 401,\r\n message = \"Authentication credentials were not provided.\",\r\n param = \"detail\"\r\n)\r\n\r\nGET_REPO_STATUS_404 = ErrorResponseCollection(\r\n status = 404,\r\n message = \"NOT FOUND\"\r\n)\r\n\r\n\r\n\r\nclass ResponseCollection(object):\r\n def __init__(self, message=None, data=None):\r\n self.message = message\r\n self.data = data\r\n if self.message == None:\r\n self.message = \" \"\r\n def as_md(self):\r\n return '\\n\\n> **%s**\\n\\n```json\\n%s\\n\\n```' % \\\r\n (self.message, pprint.pformat(self.data, width=20, indent=4))\r\n\r\nGET_BRANCH_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = dict(branches=[\r\n 'master',\r\n 'develop',\r\n 'feature/get_repo'\r\n ])\r\n)\r\n\r\nGET_REPO_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = {\r\n\t\"repositories\": [\r\n\t\t{\r\n\t\t\t\"name\": \"dogproject\",\r\n\t\t\t\"url\": \"https://github.com/<user>~~~~~~.git\",\r\n\t\t\t\"latest_commit\": \"2019-09-12\",\r\n\t\t\t\"latest_scan\": \"2019-09-15\",\r\n\t\t\t\r\n\t\t},\r\n\t\t{\r\n\t\t\t\"name\": \"catproject1234533\",\r\n\t\t\t\"url\": \"https://github.com/<user>~~~~~~.git\",\r\n\t\t\t\"latest_commit\": \"2019-10-11\",\r\n\t\t\t\"latest_scan\": \"2019-10-11\",\r\n\t\t},\r\n \r\n\t],\r\n \"repository_size\": 31\r\n }\r\n)\r\n\r\nGET_COMMIT_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data ={\r\n 'commit': [\r\n {'sha': '123133010b97571286b568432f63395d18a49e05', \r\n 'message': 'fix : remove comments and fix code'}, \r\n {'sha': '312313fc750cdea348e23145948d2ee58e29f483b', \r\n 'message': 'Update : korea_api crawling and yara convert Update : korea_api crawling and yara rule convert'}, \r\n {'sha': '464d238123137e8502a455f97dca165cb2d28612', 'message': 'Initial commit'}]\r\n \r\n }\r\n)\r\n\r\n\r\nGET_CODE_DETECT_STATUS_200 = ResponseCollection(\r\n message = \"HTTP_200_OK\",\r\n data = {\r\n \"category\": [\r\n \"log_\",\r\n \"Token\",\r\n \"룰추가따라 늘어남\",\r\n \"...\"\r\n ],\r\n\r\n \"log_\": [\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"\",\r\n \"line2\": \"# Created by https://www.gitignore.io/api/git,python,django,pycharm+all\",\r\n \"line3\": \"## HUFORMATION ##\"\r\n }\r\n ],\r\n \"Token\": [\r\n {\r\n \"file_name\": \"파일이름\",\r\n \"line_number\": 10,\r\n \"strings\": \"ddddd\",\r\n \"line1\": \"탐지 줄 앞\",\r\n \"line2\": \"탐지된 줄\",\r\n \"line3\": \"탐지줄 다음\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"\",\r\n \"line2\": \"# Created by https://www.gitignore.io/api/git,python,django,pycharm+all\",\r\n \"line3\": \"## HUFORMATION ##\"\r\n }\r\n ],\r\n \"룰추가따라 늘어남\": [\r\n {\r\n \"file_name\": \"파일이름\",\r\n \"line_number\": 302,\r\n \"strings\": \"ddddd\",\r\n \"line1\": \"탐지 줄 앞\",\r\n \"line2\": \"탐지된 줄\",\r\n \"line3\": \"탐지줄 다음\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n ],\r\n \"...\": [\r\n {\r\n \"file_name\": \".gitignore\",\r\n \"line_number\": 1,\r\n \"strings\": \"a\",\r\n \"line1\": \"aa\",\r\n \"line2\": \"~~a~~~\",\r\n \"line3\": \"다음줄\"\r\n },\r\n ]\r\n }\r\n)",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
# collectd-vcenter - vcenter.py
#
# Author : Loic Lambiel @ exoscale
# Contributor : Josh VanderLinden
# Description : This is a collectd python module to gather stats from Vmware
# vcenter
import logging
import ssl
import time
from pysphere import VIServer
try:
import collectd
COLLECTD_ENABLED = True
except ImportError:
COLLECTD_ENABLED = False
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
MB = 1024 ** 2
HOST_STATUS = ('green', 'gray', 'yellow', 'red')
class Collector(object):
def __init__(self, vcenters, username=None, password=None,
verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {
'datastore': {},
'datacenter': {},
}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.capacity',
'summary.freeSpace',
], from_node=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {
'capacity': capacity,
'free': free,
'used': capacity - free,
'usage': usage,
}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
else:
if 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
# change collection behavior based on the type of group we're dealing
# with
if group_type == 'datacenter':
# find each cluster in the datacenter
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
# find each host in the datacenter or cluster
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
# initialize some metrics
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
# iterate over each child node in this object group
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
# aggregate data from each child to the top level
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
# recalculate percentages
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
# return the current metrics for this group
group_stats = {
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
child_type: child_stats,
}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize',
'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz',
], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {
'status': status,
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'cpu_count': cpu_count,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
# report information for all vCenter servers
for vcenter, data in info.items():
# report datastore information
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
# report datacenter information
for dc_name, dc_data in data['datacenter'].items():
# extract any cluster and host information for later processing
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
# report cluster information
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
# report host information
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {
logging.CRITICAL: collectd.error,
logging.ERROR: collectd.error,
logging.WARN: collectd.warning,
logging.INFO: collectd.info,
logging.DEBUG: collectd.info,
}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
if COLLECTD_ENABLED:
instance = CollectdCollector([])
collectd.register_config(instance.configure)
collectd.register_read(instance.read)
|
normal
|
{
"blob_id": "55f76ae1ffe0fb2d2ca2c7a20aab45ffb00cf178",
"index": 613,
"step-1": "<mask token>\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Collector(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n <mask token>\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n capacity = free = usage = 0\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.capacity', 'summary.freeSpace'], from_node\n =obj, obj_type='Datastore')\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n return {'capacity': capacity, 'free': free, 'used': capacity - free,\n 'usage': usage}\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-5": "# collectd-vcenter - vcenter.py\n#\n# Author : Loic Lambiel @ exoscale\n# Contributor : Josh VanderLinden\n# Description : This is a collectd python module to gather stats from Vmware\n# vcenter\n\nimport logging\nimport ssl\nimport time\n\nfrom pysphere import VIServer\n\ntry:\n import collectd\n COLLECTD_ENABLED = True\nexcept ImportError:\n COLLECTD_ENABLED = False\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nMB = 1024 ** 2\nHOST_STATUS = ('green', 'gray', 'yellow', 'red')\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None,\n verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n\n stats = {\n 'datastore': {},\n 'datacenter': {},\n }\n\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n\n capacity = free = usage = 0\n\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.capacity',\n 'summary.freeSpace',\n ], from_node=obj, obj_type='Datastore')\n\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n\n return {\n 'capacity': capacity,\n 'free': free,\n 'used': capacity - free,\n 'usage': usage,\n }\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n\n if '.' in name:\n name = name.split('.')[0]\n\n stats = self._poll_group('datacenter', server, obj, name)\n\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n else:\n if 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n\n # change collection behavior based on the type of group we're dealing\n # with\n if group_type == 'datacenter':\n # find each cluster in the datacenter\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n # find each host in the datacenter or cluster\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n\n # initialize some metrics\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n\n # iterate over each child node in this object group\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n\n # aggregate data from each child to the top level\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n\n # recalculate percentages\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n\n # return the current metrics for this group\n group_stats = {\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n child_type: child_stats,\n }\n\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n\n self.log.debug('Beginning read callback')\n info = self.poll()\n\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n\n # report information for all vCenter servers\n for vcenter, data in info.items():\n # report datastore information\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n\n # report datacenter information\n for dc_name, dc_data in data['datacenter'].items():\n # extract any cluster and host information for later processing\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n\n # report cluster information\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n\n # report host information\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n\n if COLLECTD_ENABLED:\n self._handler_map = {\n logging.CRITICAL: collectd.error,\n logging.ERROR: collectd.error,\n logging.WARN: collectd.warning,\n logging.INFO: collectd.info,\n logging.DEBUG: collectd.info,\n }\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n\n if record.level == logging.DEBUG and not self.verbose:\n return\n\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\nif COLLECTD_ENABLED:\n instance = CollectdCollector([])\n\n collectd.register_config(instance.configure)\n collectd.register_read(instance.read)\n",
"step-ids": [
11,
13,
19,
20,
24
]
}
|
[
11,
13,
19,
20,
24
] |
<|reserved_special_token_0|>
@app.route('/QAsearch', methods=['POST', 'GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer', word=question))
return render_template('QAsearch.html', title='QAsearch Page', year=
datetime.now().year, form=form, question=question)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template('instruction.html', title='说明', year=datetime.
now().year, message='Instruction')
<|reserved_special_token_0|>
@app.route('/main')
@app.route('/')
def main():
return render_template('newMain.html', title='Welcome Page', year=
datetime.now().year)
@app.route('/graph_search', methods=['get', 'post'])
def graph_search():
return render_template('graph_search.html', title='Graph search page',
year=datetime.now().year)
@app.route('/knowledge_search', methods=['get', 'post'])
def knowledge_search():
searchKnowledge = knowledgeSearch()
des = request.args.get('description')
json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test', methods=['get', 'post'])
def case_search_Test():
return render_template('case_search_Test.html', title=
'Case search page', year=datetime.now().year)
@app.route('/case_graph_search', methods=['get', 'post'])
def case_graph_search():
caseDes = request.args.get('caseDes')
case_graph_result = caseQuery(caseDes)
pre_json_data = case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife', methods=['get', 'post'])
def knife():
return render_template('knife.html', title='KNIFE SEARCH', year=
datetime.now().year)
@app.route('/searchAll', methods=['get', 'post'])
def searchAll():
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/QAsearch', methods=['POST', 'GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer', word=question))
return render_template('QAsearch.html', title='QAsearch Page', year=
datetime.now().year, form=form, question=question)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template('instruction.html', title='说明', year=datetime.
now().year, message='Instruction')
@app.route('/about')
def about():
"""Renders the about page."""
return render_template('about.html', title='About', year=datetime.now()
.year, message='Your application description page.')
<|reserved_special_token_0|>
@app.route('/main')
@app.route('/')
def main():
return render_template('newMain.html', title='Welcome Page', year=
datetime.now().year)
@app.route('/graph_search', methods=['get', 'post'])
def graph_search():
return render_template('graph_search.html', title='Graph search page',
year=datetime.now().year)
@app.route('/knowledge_search', methods=['get', 'post'])
def knowledge_search():
searchKnowledge = knowledgeSearch()
des = request.args.get('description')
json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test', methods=['get', 'post'])
def case_search_Test():
return render_template('case_search_Test.html', title=
'Case search page', year=datetime.now().year)
@app.route('/case_graph_search', methods=['get', 'post'])
def case_graph_search():
caseDes = request.args.get('caseDes')
case_graph_result = caseQuery(caseDes)
pre_json_data = case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife', methods=['get', 'post'])
def knife():
return render_template('knife.html', title='KNIFE SEARCH', year=
datetime.now().year)
@app.route('/searchAll', methods=['get', 'post'])
def searchAll():
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/QAsearch', methods=['POST', 'GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer', word=question))
return render_template('QAsearch.html', title='QAsearch Page', year=
datetime.now().year, form=form, question=question)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template('instruction.html', title='说明', year=datetime.
now().year, message='Instruction')
@app.route('/about')
def about():
"""Renders the about page."""
return render_template('about.html', title='About', year=datetime.now()
.year, message='Your application description page.')
@app.route('/answer/<word>')
def answer(word):
"""Renders the answer page"""
print(word)
start = time.clock()
finder = answerFinder()
answer = finder.findANDpack(word)
end = time.clock()
print(str(end - start))
return render_template('answer.html', title='Answer', answer=answer)
@app.route('/main')
@app.route('/')
def main():
return render_template('newMain.html', title='Welcome Page', year=
datetime.now().year)
@app.route('/graph_search', methods=['get', 'post'])
def graph_search():
return render_template('graph_search.html', title='Graph search page',
year=datetime.now().year)
@app.route('/knowledge_search', methods=['get', 'post'])
def knowledge_search():
searchKnowledge = knowledgeSearch()
des = request.args.get('description')
json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test', methods=['get', 'post'])
def case_search_Test():
return render_template('case_search_Test.html', title=
'Case search page', year=datetime.now().year)
@app.route('/case_graph_search', methods=['get', 'post'])
def case_graph_search():
caseDes = request.args.get('caseDes')
case_graph_result = caseQuery(caseDes)
pre_json_data = case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife', methods=['get', 'post'])
def knife():
return render_template('knife.html', title='KNIFE SEARCH', year=
datetime.now().year)
@app.route('/searchAll', methods=['get', 'post'])
def searchAll():
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from datetime import datetime
from flask import render_template, redirect, url_for, request, jsonify
from athena_App import app
from athena_App.formClass import QuestionForm
import time
from athena_App.layer_frontInteracting.qa_module import answerFinder
from athena_App.layer_frontInteracting.kg_module import knowledgeSearch
from athena_App.layer_frontInteracting.case_module import caseQuery
@app.route('/QAsearch', methods=['POST', 'GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer', word=question))
return render_template('QAsearch.html', title='QAsearch Page', year=
datetime.now().year, form=form, question=question)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template('instruction.html', title='说明', year=datetime.
now().year, message='Instruction')
@app.route('/about')
def about():
"""Renders the about page."""
return render_template('about.html', title='About', year=datetime.now()
.year, message='Your application description page.')
@app.route('/answer/<word>')
def answer(word):
"""Renders the answer page"""
print(word)
start = time.clock()
finder = answerFinder()
answer = finder.findANDpack(word)
end = time.clock()
print(str(end - start))
return render_template('answer.html', title='Answer', answer=answer)
@app.route('/main')
@app.route('/')
def main():
return render_template('newMain.html', title='Welcome Page', year=
datetime.now().year)
@app.route('/graph_search', methods=['get', 'post'])
def graph_search():
return render_template('graph_search.html', title='Graph search page',
year=datetime.now().year)
@app.route('/knowledge_search', methods=['get', 'post'])
def knowledge_search():
searchKnowledge = knowledgeSearch()
des = request.args.get('description')
json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test', methods=['get', 'post'])
def case_search_Test():
return render_template('case_search_Test.html', title=
'Case search page', year=datetime.now().year)
@app.route('/case_graph_search', methods=['get', 'post'])
def case_graph_search():
caseDes = request.args.get('caseDes')
case_graph_result = caseQuery(caseDes)
pre_json_data = case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife', methods=['get', 'post'])
def knife():
return render_template('knife.html', title='KNIFE SEARCH', year=
datetime.now().year)
@app.route('/searchAll', methods=['get', 'post'])
def searchAll():
pass
<|reserved_special_token_1|>
"""
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template, redirect, url_for, request, jsonify
from athena_App import app
from athena_App.formClass import QuestionForm
import time
#attention:
#this module include large word vector which need a lot of time to load
#turn it off when when you debugging other module
#
#from athena_App.data_process.es_QAsearch import *
#
#from athena_App.data_process.keywordCompare import Keyword_Compare, Answer
#from athena_App.data_process.word2vecCompareModel import *
#from athena_App.data_process.graph_query import *
#from athena_App.openlaw.graphOfcase_query_echart import *
#reconstruct series
from athena_App.layer_frontInteracting.qa_module import answerFinder
from athena_App.layer_frontInteracting.kg_module import knowledgeSearch
from athena_App.layer_frontInteracting.case_module import caseQuery
@app.route('/QAsearch', methods=['POST','GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer',word=question))
return render_template(
'QAsearch.html',
title = 'QAsearch Page',
year = datetime.now().year,
form = form,
question = question
)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template(
'instruction.html',
title='说明',
year=datetime.now().year,
message='Instruction'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
@app.route('/answer/<word>')
def answer(word):
"""Renders the answer page"""
print(word)
start=time.clock()
finder=answerFinder()
answer=finder.findANDpack(word)
end=time.clock()
print(str(end-start))
return render_template(
'answer.html',
title='Answer',
answer=answer
)
@app.route('/main')
@app.route('/')
def main():
return render_template(
'newMain.html',
title = 'Welcome Page',
year = datetime.now().year
)
@app.route('/graph_search',methods=['get','post'])
def graph_search():
return render_template(
'graph_search.html',
title = 'Graph search page',
year = datetime.now().year)
@app.route('/knowledge_search',methods=['get','post'])
def knowledge_search():
#initialize graph search object
searchKnowledge=knowledgeSearch()
des=request.args.get('description')
json_data=searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test',methods=['get','post'])
def case_search_Test():
return render_template(
'case_search_Test.html',
title = 'Case search page',
year = datetime.now().year)
@app.route('/case_graph_search',methods=['get','post'])
def case_graph_search():
caseDes=request.args.get('caseDes')
#initialize graph search object
case_graph_result=caseQuery(caseDes)
pre_json_data=case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife',methods=['get','post'])
def knife():
return render_template(
'knife.html',
title = 'KNIFE SEARCH',
year = datetime.now().year
)
@app.route('/searchAll',methods=['get','post'])
def searchAll():
pass
|
flexible
|
{
"blob_id": "3457a7c080da041ad279239bd6a3d214a3b8e49f",
"index": 6695,
"step-1": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n<mask token>\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-2": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n<mask token>\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-3": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start = time.clock()\n finder = answerFinder()\n answer = finder.findANDpack(word)\n end = time.clock()\n print(str(end - start))\n return render_template('answer.html', title='Answer', answer=answer)\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-4": "<mask token>\nfrom datetime import datetime\nfrom flask import render_template, redirect, url_for, request, jsonify\nfrom athena_App import app\nfrom athena_App.formClass import QuestionForm\nimport time\nfrom athena_App.layer_frontInteracting.qa_module import answerFinder\nfrom athena_App.layer_frontInteracting.kg_module import knowledgeSearch\nfrom athena_App.layer_frontInteracting.case_module import caseQuery\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start = time.clock()\n finder = answerFinder()\n answer = finder.findANDpack(word)\n end = time.clock()\n print(str(end - start))\n return render_template('answer.html', title='Answer', answer=answer)\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-5": "\"\"\"\nRoutes and views for the flask application.\n\"\"\"\n\nfrom datetime import datetime\nfrom flask import render_template, redirect, url_for, request, jsonify\nfrom athena_App import app\nfrom athena_App.formClass import QuestionForm\n\nimport time\n\n#attention:\n#this module include large word vector which need a lot of time to load\n#turn it off when when you debugging other module\n#\n#from athena_App.data_process.es_QAsearch import *\n#\n\n#from athena_App.data_process.keywordCompare import Keyword_Compare, Answer\n#from athena_App.data_process.word2vecCompareModel import *\n\n#from athena_App.data_process.graph_query import *\n\n#from athena_App.openlaw.graphOfcase_query_echart import *\n\n#reconstruct series\n\nfrom athena_App.layer_frontInteracting.qa_module import answerFinder\nfrom athena_App.layer_frontInteracting.kg_module import knowledgeSearch\nfrom athena_App.layer_frontInteracting.case_module import caseQuery\n\n\n@app.route('/QAsearch', methods=['POST','GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer',word=question))\n return render_template(\n 'QAsearch.html',\n title = 'QAsearch Page',\n year = datetime.now().year,\n form = form,\n question = question\n )\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template(\n 'instruction.html',\n title='说明',\n year=datetime.now().year,\n message='Instruction'\n )\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start=time.clock()\n finder=answerFinder()\n answer=finder.findANDpack(word)\n end=time.clock()\n print(str(end-start))\n return render_template(\n 'answer.html',\n title='Answer',\n answer=answer\n )\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template(\n 'newMain.html',\n title = 'Welcome Page',\n year = datetime.now().year\n )\n\n@app.route('/graph_search',methods=['get','post'])\ndef graph_search():\n return render_template(\n 'graph_search.html',\n title = 'Graph search page',\n year = datetime.now().year)\n\n@app.route('/knowledge_search',methods=['get','post'])\ndef knowledge_search():\n\n #initialize graph search object\n searchKnowledge=knowledgeSearch()\n\n des=request.args.get('description')\n json_data=searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n\n return jsonify(json_data)\n\n@app.route('/case_search_Test',methods=['get','post'])\ndef case_search_Test():\n return render_template(\n 'case_search_Test.html',\n title = 'Case search page',\n year = datetime.now().year)\n\n@app.route('/case_graph_search',methods=['get','post'])\ndef case_graph_search():\n\n caseDes=request.args.get('caseDes')\n #initialize graph search object\n case_graph_result=caseQuery(caseDes)\n\n pre_json_data=case_graph_result.getData()\n print(pre_json_data)\n\n return jsonify(pre_json_data)\n\n@app.route('/knife',methods=['get','post'])\ndef knife():\n return render_template(\n 'knife.html',\n title = 'KNIFE SEARCH',\n year = datetime.now().year\n )\n\n@app.route('/searchAll',methods=['get','post'])\ndef searchAll():\n pass",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
class BasicModel(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicModel(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicModel(object):
<|reserved_special_token_0|>
analytical_gradient_prediction = True
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicModel(object):
"""
Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.
"""
analytical_gradient_prediction = True
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
<|reserved_special_token_1|>
# Copyright (c) 2018, Raul Astudillo
import numpy as np
from copy import deepcopy
class BasicModel(object):
"""
Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.
"""
analytical_gradient_prediction = True
def __init__(self, output_dim=None, X=None, Y=None):
self.output_dim = output_dim
self.X = X
self.Y = Y
self.name = 'basic model'
def updateModel(self, X, Y):
"""
Updates the model with new observations.
"""
self.X = X
self.Y = Y
def get_X(self):
return np.copy(self.X)
def get_Y(self):
return deepcopy(self.Y)
def get_XY(self):
X = np.copy(self.X)
Y = deepcopy(self.Y)
return X, Y
def get_model_parameters(self):
"""
"""
pass
def get_model_parameters_names(self):
"""
"""
pass
|
flexible
|
{
"blob_id": "88071df9367804b1c6e2b1c80da178ab7658e7a4",
"index": 3861,
"step-1": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n <mask token>\n <mask token>\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n <mask token>\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass BasicModel(object):\n <mask token>\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-4": "<mask token>\n\n\nclass BasicModel(object):\n \"\"\"\n Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.\n \"\"\"\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n\n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-5": "# Copyright (c) 2018, Raul Astudillo\n\nimport numpy as np\nfrom copy import deepcopy\n\nclass BasicModel(object):\n \"\"\"\n Class for handling a very simple model that only requires saving the evaluated points (along with their corresponding outputs) so far.\n \"\"\"\n analytical_gradient_prediction = True\n\n def __init__(self, output_dim=None, X=None, Y=None):\n self.output_dim = output_dim\n self.X = X\n self.Y = Y\n self.name = 'basic model'\n\n def updateModel(self, X, Y):\n \"\"\"\n Updates the model with new observations.\n \"\"\"\n self.X = X\n self.Y = Y\n\n def get_X(self):\n return np.copy(self.X)\n\n def get_Y(self):\n return deepcopy(self.Y)\n\n def get_XY(self):\n X = np.copy(self.X)\n Y = deepcopy(self.Y)\n return X, Y\n \n def get_model_parameters(self):\n \"\"\"\n \"\"\"\n pass\n\n def get_model_parameters_names(self):\n \"\"\"\n \"\"\"\n pass\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
<|reserved_special_token_1|>
from flask import Blueprint, jsonify, request, abort, current_app
import json
from config import config_orm_initial
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods=['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
try:
temp_list = json.loads(comments_range)
if isinstance(temp_list, list) and len(temp_list) == 2:
target_article = session.query(Article_list).filter_by(id=
article_id).one_or_none()
if target_article:
target_comments = target_article.relate_comments
comments_in_range = target_comments[-1 - temp_list[0]:-1 -
temp_list[1]:-1]
comments_count = len(target_comments)
comments_list = list(map(lambda x: {'comment': x.content,
'time': x.time, 'user_name': session.query(user).
filter_by(id=x.user_id).one().nickname, 'user_avatar':
session.query(user).filter_by(id=x.user_id).one().
avatar}, comments_in_range))
resp = {'status': 200, 'result': {'count': comments_count,
'commentsList': comments_list}}
session.close()
return jsonify(resp)
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
<|reserved_special_token_1|>
# -*- coding: UTF-8 -*-
from flask import Blueprint, jsonify, request, abort, current_app
import json
from config import config_orm_initial
orm = config_orm_initial.initialize_orm()
session = orm['dict_session']
Article_list = orm['dict_Articlelist']
user = orm['dict_user']
app = Blueprint('api_get_comments', __name__)
@app.route('/comments/<article_id>', methods = ['POST'])
def get_comments(article_id):
comments_range = request.form.get('comments_for_single')
# 尝试把前端传来的参数解析成list
try:
temp_list = json.loads(comments_range)
# 判断参数是否是list,并且只有2个元素
if isinstance(temp_list, list) and len(temp_list) == 2:
# 先找到对应的article
target_article = session.query(Article_list).filter_by(id = article_id).one_or_none()
# 如果能找到这篇文章
if target_article:
# 然后调用一对多方法,拿到这篇article对应的comments和comments总数
target_comments = target_article.relate_comments
# 拿到的结果和list差不多,所以取倒数排序
comments_in_range = target_comments[-1-temp_list[0] : -1-temp_list[1]: -1]
comments_count = len(target_comments)
comments_list = list(map(
lambda x:{
'comment':x.content,
'time':x.time,
'user_name':session.query(user).filter_by(id=x.user_id).one().nickname,
'user_avatar':session.query(user).filter_by(id=x.user_id).one().avatar
},
comments_in_range)
)
resp = {'status': 200, 'result': {'count': comments_count, 'commentsList': comments_list}}
session.close()
return jsonify(resp)
# 如果不能找到这篇文章
else:
abort(400)
else:
abort(400)
except Exception as e:
current_app.logger.info(e)
abort(400)
|
flexible
|
{
"blob_id": "016c004fd95d901a6d55b6f7460397223a6baa3b",
"index": 1881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-3": "<mask token>\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\napp = Blueprint('api_get_comments', __name__)\n\n\n@app.route('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-4": "from flask import Blueprint, jsonify, request, abort, current_app\nimport json\nfrom config import config_orm_initial\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\napp = Blueprint('api_get_comments', __name__)\n\n\n@app.route('/comments/<article_id>', methods=['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n try:\n temp_list = json.loads(comments_range)\n if isinstance(temp_list, list) and len(temp_list) == 2:\n target_article = session.query(Article_list).filter_by(id=\n article_id).one_or_none()\n if target_article:\n target_comments = target_article.relate_comments\n comments_in_range = target_comments[-1 - temp_list[0]:-1 -\n temp_list[1]:-1]\n comments_count = len(target_comments)\n comments_list = list(map(lambda x: {'comment': x.content,\n 'time': x.time, 'user_name': session.query(user).\n filter_by(id=x.user_id).one().nickname, 'user_avatar':\n session.query(user).filter_by(id=x.user_id).one().\n avatar}, comments_in_range))\n resp = {'status': 200, 'result': {'count': comments_count,\n 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)\n",
"step-5": "# -*- coding: UTF-8 -*-\nfrom flask import Blueprint, jsonify, request, abort, current_app\nimport json\nfrom config import config_orm_initial\n\norm = config_orm_initial.initialize_orm()\nsession = orm['dict_session']\nArticle_list = orm['dict_Articlelist']\nuser = orm['dict_user']\n\napp = Blueprint('api_get_comments', __name__)\n\n@app.route('/comments/<article_id>', methods = ['POST'])\ndef get_comments(article_id):\n comments_range = request.form.get('comments_for_single')\n # 尝试把前端传来的参数解析成list\n try:\n temp_list = json.loads(comments_range)\n\n # 判断参数是否是list,并且只有2个元素\n if isinstance(temp_list, list) and len(temp_list) == 2:\n # 先找到对应的article\n target_article = session.query(Article_list).filter_by(id = article_id).one_or_none()\n # 如果能找到这篇文章\n if target_article:\n # 然后调用一对多方法,拿到这篇article对应的comments和comments总数\n target_comments = target_article.relate_comments\n # 拿到的结果和list差不多,所以取倒数排序\n comments_in_range = target_comments[-1-temp_list[0] : -1-temp_list[1]: -1]\n comments_count = len(target_comments)\n comments_list = list(map(\n lambda x:{\n 'comment':x.content, \n 'time':x.time, \n 'user_name':session.query(user).filter_by(id=x.user_id).one().nickname,\n 'user_avatar':session.query(user).filter_by(id=x.user_id).one().avatar\n },\n comments_in_range)\n )\n resp = {'status': 200, 'result': {'count': comments_count, 'commentsList': comments_list}}\n session.close()\n return jsonify(resp)\n # 如果不能找到这篇文章\n else:\n abort(400)\n else:\n abort(400)\n except Exception as e:\n current_app.logger.info(e)\n abort(400)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.utils import timezone
from factory import DjangoModelFactory
from djtriggers.tests.models import DummyTrigger
class DummyTriggerFactory(DjangoModelFactory):
class Meta:
model = DummyTrigger
trigger_type = 'dummy_trigger_test'
source = 'tests'
date_received = timezone.now()
date_processed = None
process_after = None
number_of_tries = 0
|
normal
|
{
"blob_id": "813354c9c294c0323c1b54cda7074fbffa49cdb3",
"index": 442,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n trigger_type = 'dummy_trigger_test'\n source = 'tests'\n date_received = timezone.now()\n date_processed = None\n process_after = None\n number_of_tries = 0\n",
"step-4": "from django.utils import timezone\nfrom factory import DjangoModelFactory\nfrom djtriggers.tests.models import DummyTrigger\n\n\nclass DummyTriggerFactory(DjangoModelFactory):\n\n\n class Meta:\n model = DummyTrigger\n trigger_type = 'dummy_trigger_test'\n source = 'tests'\n date_received = timezone.now()\n date_processed = None\n process_after = None\n number_of_tries = 0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from .data_processing import make_request_data, clear_request_data, get_token_from_text
from .review import Review
|
normal
|
{
"blob_id": "5d654c056e6ef01e72821427c4f8dcb285755ee9",
"index": 2933,
"step-1": "<mask token>\n",
"step-2": "from .data_processing import make_request_data, clear_request_data, get_token_from_text\nfrom .review import Review\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def get_bits(x):
return np.where(x < 0, 0, 1)
<|reserved_special_token_0|>
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
<|reserved_special_token_0|>
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,
real))))
return h
<|reserved_special_token_0|>
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
<|reserved_special_token_0|>
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]
)
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *
NUM_ANT, 1])
return get_bits(s_mld)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_bits(x):
return np.where(x < 0, 0, 1)
<|reserved_special_token_0|>
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
def mkfile(file_path):
mkdir(file_path)
filename = pathlib.Path(file_path)
filename.touch(exist_ok=True)
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,
real))))
return h
<|reserved_special_token_0|>
def signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):
s_batch = None
random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,
size=batch_size)
for t in range(batch_size):
i = int(random_indexes[t])
s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])
s_batch = concatenate(s_batch, s)
return s_batch
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
<|reserved_special_token_0|>
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]
)
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *
NUM_ANT, 1])
return get_bits(s_mld)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_bits(x):
return np.where(x < 0, 0, 1)
def check_wrong_bits(bits, bits_estimated):
return len(np.argwhere(bits != bits_estimated))
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
def mkfile(file_path):
mkdir(file_path)
filename = pathlib.Path(file_path)
filename.touch(exist_ok=True)
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,
real))))
return h
def make_channel_batch():
h_batch = None
for _ in range(PACKETS_PER_BATCH):
h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])
for _ in range(TIME_SLOTS_PER_PACKET):
h_batch = concatenate(h_batch, h)
return h_batch
def signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):
s_batch = None
random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,
size=batch_size)
for t in range(batch_size):
i = int(random_indexes[t])
s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])
s_batch = concatenate(s_batch, s)
return s_batch
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
<|reserved_special_token_0|>
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]
)
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *
NUM_ANT, 1])
return get_bits(s_mld)
<|reserved_special_token_1|>
import os
import pathlib
from global_settings import *
def get_bits(x):
return np.where(x < 0, 0, 1)
def check_wrong_bits(bits, bits_estimated):
return len(np.argwhere(bits != bits_estimated))
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
def mkfile(file_path):
mkdir(file_path)
filename = pathlib.Path(file_path)
filename.touch(exist_ok=True)
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,
real))))
return h
def make_channel_batch():
h_batch = None
for _ in range(PACKETS_PER_BATCH):
h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])
for _ in range(TIME_SLOTS_PER_PACKET):
h_batch = concatenate(h_batch, h)
return h_batch
def signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):
s_batch = None
random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,
size=batch_size)
for t in range(batch_size):
i = int(random_indexes[t])
s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])
s_batch = concatenate(s_batch, s)
return s_batch
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def lmmse_batch(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
eye = np.concatenate([np.eye(n).reshape([1, n, n]) * batch_size], axis=0)
ht = np.transpose(h, axes=[0, 2, 1])
z = np.linalg.inv(ht @ h + eye) @ ht @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]
)
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *
NUM_ANT, 1])
return get_bits(s_mld)
<|reserved_special_token_1|>
import os
import pathlib
from global_settings import *
def get_bits(x):
return np.where(x < 0, 0, 1)
def check_wrong_bits(bits, bits_estimated):
return len(np.argwhere(bits != bits_estimated))
def mkdir(file_path):
folder = os.path.dirname(file_path)
if not os.path.exists(folder):
os.makedirs(folder)
def mkfile(file_path):
mkdir(file_path)
filename = pathlib.Path(file_path)
filename.touch(exist_ok=True)
def concatenate(total, part):
return part if total is None else np.concatenate((total, part))
def complex_channel(m=NUM_ANT, n=NUM_ANT):
real = np.random.randn(m, n)
imag = np.random.randn(m, n)
h = np.row_stack(
(
np.column_stack((real, -imag)),
np.column_stack((imag, real)),
)
)
return h
def make_channel_batch():
h_batch = None
for _ in range(PACKETS_PER_BATCH):
h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])
for _ in range(TIME_SLOTS_PER_PACKET):
h_batch = concatenate(h_batch, h)
return h_batch
def signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):
s_batch = None
random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE, size=batch_size)
for t in range(batch_size):
i = int(random_indexes[t])
s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])
s_batch = concatenate(s_batch, s)
return s_batch
def random_distance(n, length):
x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2
return np.sqrt(x ** 2 + y ** 2)
def zf_batch(y, h):
h_t = np.transpose(h, axes=[0, 2, 1])
f = np.linalg.inv(h_t @ h) @ h_t
z = f @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def lmmse_batch(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
eye = np.concatenate([np.eye(n).reshape([1, n, n]) * batch_size], axis=0)
ht = np.transpose(h, axes=[0, 2, 1])
z = np.linalg.inv(ht @ h + eye) @ ht @ y
return np.where(z < 0, -1, 1) / np.sqrt(2)
def maximum_likelihood_detect_bits(y, h):
assert len(h.shape) == 3
batch_size, m, n = h.shape
s_mld = np.zeros([batch_size, n, 1])
if True:
dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)
else:
dst = None
for j in range(QPSK_CANDIDATE_SIZE):
s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])
dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1])
if dst is None:
dst = dj
else:
dst = np.concatenate((dst, dj), axis=1)
min_indexes = dst.argmin(1)
for i, t in enumerate(min_indexes):
s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 * NUM_ANT, 1])
return get_bits(s_mld)
|
flexible
|
{
"blob_id": "74ffbd55867c4b2c6ccbef7d94e0c65aef139057",
"index": 7602,
"step-1": "<mask token>\n\n\ndef get_bits(x):\n return np.where(x < 0, 0, 1)\n\n\n<mask token>\n\n\ndef mkdir(file_path):\n folder = os.path.dirname(file_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\n<mask token>\n\n\ndef concatenate(total, part):\n return part if total is None else np.concatenate((total, part))\n\n\ndef complex_channel(m=NUM_ANT, n=NUM_ANT):\n real = np.random.randn(m, n)\n imag = np.random.randn(m, n)\n h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,\n real))))\n return h\n\n\n<mask token>\n\n\ndef random_distance(n, length):\n x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n return np.sqrt(x ** 2 + y ** 2)\n\n\ndef zf_batch(y, h):\n h_t = np.transpose(h, axes=[0, 2, 1])\n f = np.linalg.inv(h_t @ h) @ h_t\n z = f @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\n<mask token>\n\n\ndef maximum_likelihood_detect_bits(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n s_mld = np.zeros([batch_size, n, 1])\n if True:\n dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)\n else:\n dst = None\n for j in range(QPSK_CANDIDATE_SIZE):\n s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])\n dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]\n )\n if dst is None:\n dst = dj\n else:\n dst = np.concatenate((dst, dj), axis=1)\n min_indexes = dst.argmin(1)\n for i, t in enumerate(min_indexes):\n s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *\n NUM_ANT, 1])\n return get_bits(s_mld)\n",
"step-2": "<mask token>\n\n\ndef get_bits(x):\n return np.where(x < 0, 0, 1)\n\n\n<mask token>\n\n\ndef mkdir(file_path):\n folder = os.path.dirname(file_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef mkfile(file_path):\n mkdir(file_path)\n filename = pathlib.Path(file_path)\n filename.touch(exist_ok=True)\n\n\ndef concatenate(total, part):\n return part if total is None else np.concatenate((total, part))\n\n\ndef complex_channel(m=NUM_ANT, n=NUM_ANT):\n real = np.random.randn(m, n)\n imag = np.random.randn(m, n)\n h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,\n real))))\n return h\n\n\n<mask token>\n\n\ndef signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):\n s_batch = None\n random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,\n size=batch_size)\n for t in range(batch_size):\n i = int(random_indexes[t])\n s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])\n s_batch = concatenate(s_batch, s)\n return s_batch\n\n\ndef random_distance(n, length):\n x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n return np.sqrt(x ** 2 + y ** 2)\n\n\ndef zf_batch(y, h):\n h_t = np.transpose(h, axes=[0, 2, 1])\n f = np.linalg.inv(h_t @ h) @ h_t\n z = f @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\n<mask token>\n\n\ndef maximum_likelihood_detect_bits(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n s_mld = np.zeros([batch_size, n, 1])\n if True:\n dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)\n else:\n dst = None\n for j in range(QPSK_CANDIDATE_SIZE):\n s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])\n dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]\n )\n if dst is None:\n dst = dj\n else:\n dst = np.concatenate((dst, dj), axis=1)\n min_indexes = dst.argmin(1)\n for i, t in enumerate(min_indexes):\n s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *\n NUM_ANT, 1])\n return get_bits(s_mld)\n",
"step-3": "<mask token>\n\n\ndef get_bits(x):\n return np.where(x < 0, 0, 1)\n\n\ndef check_wrong_bits(bits, bits_estimated):\n return len(np.argwhere(bits != bits_estimated))\n\n\ndef mkdir(file_path):\n folder = os.path.dirname(file_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef mkfile(file_path):\n mkdir(file_path)\n filename = pathlib.Path(file_path)\n filename.touch(exist_ok=True)\n\n\ndef concatenate(total, part):\n return part if total is None else np.concatenate((total, part))\n\n\ndef complex_channel(m=NUM_ANT, n=NUM_ANT):\n real = np.random.randn(m, n)\n imag = np.random.randn(m, n)\n h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,\n real))))\n return h\n\n\ndef make_channel_batch():\n h_batch = None\n for _ in range(PACKETS_PER_BATCH):\n h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])\n for _ in range(TIME_SLOTS_PER_PACKET):\n h_batch = concatenate(h_batch, h)\n return h_batch\n\n\ndef signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):\n s_batch = None\n random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,\n size=batch_size)\n for t in range(batch_size):\n i = int(random_indexes[t])\n s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])\n s_batch = concatenate(s_batch, s)\n return s_batch\n\n\ndef random_distance(n, length):\n x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n return np.sqrt(x ** 2 + y ** 2)\n\n\ndef zf_batch(y, h):\n h_t = np.transpose(h, axes=[0, 2, 1])\n f = np.linalg.inv(h_t @ h) @ h_t\n z = f @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\n<mask token>\n\n\ndef maximum_likelihood_detect_bits(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n s_mld = np.zeros([batch_size, n, 1])\n if True:\n dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)\n else:\n dst = None\n for j in range(QPSK_CANDIDATE_SIZE):\n s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])\n dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]\n )\n if dst is None:\n dst = dj\n else:\n dst = np.concatenate((dst, dj), axis=1)\n min_indexes = dst.argmin(1)\n for i, t in enumerate(min_indexes):\n s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *\n NUM_ANT, 1])\n return get_bits(s_mld)\n",
"step-4": "import os\nimport pathlib\nfrom global_settings import *\n\n\ndef get_bits(x):\n return np.where(x < 0, 0, 1)\n\n\ndef check_wrong_bits(bits, bits_estimated):\n return len(np.argwhere(bits != bits_estimated))\n\n\ndef mkdir(file_path):\n folder = os.path.dirname(file_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef mkfile(file_path):\n mkdir(file_path)\n filename = pathlib.Path(file_path)\n filename.touch(exist_ok=True)\n\n\ndef concatenate(total, part):\n return part if total is None else np.concatenate((total, part))\n\n\ndef complex_channel(m=NUM_ANT, n=NUM_ANT):\n real = np.random.randn(m, n)\n imag = np.random.randn(m, n)\n h = np.row_stack((np.column_stack((real, -imag)), np.column_stack((imag,\n real))))\n return h\n\n\ndef make_channel_batch():\n h_batch = None\n for _ in range(PACKETS_PER_BATCH):\n h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])\n for _ in range(TIME_SLOTS_PER_PACKET):\n h_batch = concatenate(h_batch, h)\n return h_batch\n\n\ndef signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):\n s_batch = None\n random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE,\n size=batch_size)\n for t in range(batch_size):\n i = int(random_indexes[t])\n s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])\n s_batch = concatenate(s_batch, s)\n return s_batch\n\n\ndef random_distance(n, length):\n x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n return np.sqrt(x ** 2 + y ** 2)\n\n\ndef zf_batch(y, h):\n h_t = np.transpose(h, axes=[0, 2, 1])\n f = np.linalg.inv(h_t @ h) @ h_t\n z = f @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\ndef lmmse_batch(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n eye = np.concatenate([np.eye(n).reshape([1, n, n]) * batch_size], axis=0)\n ht = np.transpose(h, axes=[0, 2, 1])\n z = np.linalg.inv(ht @ h + eye) @ ht @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\ndef maximum_likelihood_detect_bits(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n s_mld = np.zeros([batch_size, n, 1])\n if True:\n dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)\n else:\n dst = None\n for j in range(QPSK_CANDIDATE_SIZE):\n s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])\n dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1]\n )\n if dst is None:\n dst = dj\n else:\n dst = np.concatenate((dst, dj), axis=1)\n min_indexes = dst.argmin(1)\n for i, t in enumerate(min_indexes):\n s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 *\n NUM_ANT, 1])\n return get_bits(s_mld)\n",
"step-5": "import os\nimport pathlib\n\nfrom global_settings import *\n\n\ndef get_bits(x):\n return np.where(x < 0, 0, 1)\n\n\ndef check_wrong_bits(bits, bits_estimated):\n return len(np.argwhere(bits != bits_estimated))\n\n\ndef mkdir(file_path):\n folder = os.path.dirname(file_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n\ndef mkfile(file_path):\n mkdir(file_path)\n filename = pathlib.Path(file_path)\n filename.touch(exist_ok=True)\n\n\ndef concatenate(total, part):\n return part if total is None else np.concatenate((total, part))\n\n\ndef complex_channel(m=NUM_ANT, n=NUM_ANT):\n real = np.random.randn(m, n)\n imag = np.random.randn(m, n)\n h = np.row_stack(\n (\n np.column_stack((real, -imag)),\n np.column_stack((imag, real)),\n )\n )\n return h\n\n\ndef make_channel_batch():\n h_batch = None\n for _ in range(PACKETS_PER_BATCH):\n h = complex_channel().reshape([1, 2 * NUM_ANT, 2 * NUM_ANT])\n for _ in range(TIME_SLOTS_PER_PACKET):\n h_batch = concatenate(h_batch, h)\n return h_batch\n\n\ndef signal_batch(batch_size=TIMES_SLOTS_PER_BATCH):\n s_batch = None\n random_indexes = np.random.uniform(low=0, high=QPSK_CANDIDATE_SIZE, size=batch_size)\n for t in range(batch_size):\n i = int(random_indexes[t])\n s = QPSK_CANDIDATES[:, i:i + 1].reshape([1, 2 * NUM_ANT, 1])\n s_batch = concatenate(s_batch, s)\n return s_batch\n\n\ndef random_distance(n, length):\n x = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n y = np.random.uniform(-1, 1, [n, 1, 1]) * length / 2\n return np.sqrt(x ** 2 + y ** 2)\n\n\ndef zf_batch(y, h):\n h_t = np.transpose(h, axes=[0, 2, 1])\n f = np.linalg.inv(h_t @ h) @ h_t\n z = f @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\ndef lmmse_batch(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n eye = np.concatenate([np.eye(n).reshape([1, n, n]) * batch_size], axis=0)\n ht = np.transpose(h, axes=[0, 2, 1])\n z = np.linalg.inv(ht @ h + eye) @ ht @ y\n return np.where(z < 0, -1, 1) / np.sqrt(2)\n\n\ndef maximum_likelihood_detect_bits(y, h):\n assert len(h.shape) == 3\n batch_size, m, n = h.shape\n s_mld = np.zeros([batch_size, n, 1])\n\n if True:\n dst = np.sum(np.square(y - h @ QPSK_CANDIDATES), axis=1)\n else:\n dst = None\n for j in range(QPSK_CANDIDATE_SIZE):\n s_cand = QPSK_CANDIDATES[:, j:j + 1].reshape([1, 2 * NUM_ANT, 1])\n dj = np.sum(np.square(y - h @ s_cand), axis=(1, 2)).reshape([-1, 1])\n\n if dst is None:\n dst = dj\n else:\n dst = np.concatenate((dst, dj), axis=1)\n\n min_indexes = dst.argmin(1)\n for i, t in enumerate(min_indexes):\n s_mld[i:i + 1, :, :] = QPSK_CANDIDATES[:, t].reshape([1, 2 * NUM_ANT, 1])\n\n return get_bits(s_mld)\n\n\n",
"step-ids": [
7,
9,
11,
13,
14
]
}
|
[
7,
9,
11,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
any([(p in s) for p in patterns for s in strings])
<|reserved_special_token_1|>
# Find a list of patterns in a list of string in python
any([ p in s for p in patterns for s in strings ])
|
flexible
|
{
"blob_id": "c0b6c0636d1900a31cc455795838eb958d1daf65",
"index": 9421,
"step-1": "<mask token>\n",
"step-2": "any([(p in s) for p in patterns for s in strings])\n",
"step-3": "# Find a list of patterns in a list of string in python\nany([ p in s for p in patterns for s in strings ])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.0
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.0
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not '_UL' in sys.argv[1]:
if sys.argv[4] == 'remote':
from samples import *
Debug = False
else:
from samples.samples import *
Debug = True
elif sys.argv[4] == 'remote':
from samplesUL import *
Debug = False
else:
from samples.samplesUL import *
Debug = True
<|reserved_special_token_0|>
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.0
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.0
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
<|reserved_special_token_0|>
print('Starting running at ' + str(startTime))
ROOT.gROOT.SetBatch()
<|reserved_special_token_0|>
for infile in file_list:
chain.Add(infile)
print('Number of events in chain ' + str(chain.GetEntries()))
<|reserved_special_token_0|>
print('Number of entries: ' + str(tree.GetEntries()))
<|reserved_special_token_0|>
if 'Data' in sample.name:
isMC = False
<|reserved_special_token_0|>
if 'aQGC' in sample.name:
IsDim8 = True
<|reserved_special_token_0|>
if 'DataMu' in sample.name:
dataMu = True
if 'DataEle' in sample.name:
dataEle = True
<|reserved_special_token_0|>
h2_BTaggingEff_Denom_b.Sumw2()
h2_BTaggingEff_Denom_c.Sumw2()
h2_BTaggingEff_Denom_udsg.Sumw2()
h2_BTaggingEff_Num_b.Sumw2()
h2_BTaggingEff_Num_c.Sumw2()
h2_BTaggingEff_Num_udsg.Sumw2()
for i in range(tree.GetEntries()):
if Debug:
if i > 100:
break
if not Debug and i % 5000 == 0:
print('Event #', i + 1, ' out of ', tree.GetEntries())
event = Event(tree, i)
electrons = Collection(event, 'Electron')
muons = Collection(event, 'Muon')
jets = Collection(event, 'Jet')
njets = len(jets)
fatjets = Collection(event, 'FatJet')
HLT = Object(event, 'HLT')
PV = Object(event, 'PV')
Flag = Object(event, 'Flag')
tightlep = None
tightlep_p4 = None
tightlep_p4t = None
tightlep_SF = None
tightlep_SFUp = None
tightlep_SFDown = None
recomet_p4t = None
PF_SF = None
PF_SFUp = None
PF_SFDown = None
PU_SF = None
PU_SFUp = None
PU_SFDown = None
year = sample.year
if isMC:
runPeriod = ''
else:
runPeriod = sample.runP
if not isMC:
if not Flag.eeBadScFilter:
continue
passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,
Flag)
if noTrigger:
continue
"""
GoodEle, ele_TightRegion = SelectLepton(electrons, False)
GoodMu, mu_TightRegion = SelectLepton(muons, True)
if GoodEle is None and GoodMu is None:
continue
ele_lepton_veto = -1
mu_lepton_veto = -1
if GoodEle != None:
ele_lepton_veto = LepVeto(GoodEle, electrons, muons)
if GoodMu != None:
mu_lepton_veto = LepVeto(GoodMu, electrons, muons)
SingleEle=False
SingleMu=False
ElMu=False
LeadLepFamily="not selected"
GoodLep = None
leptons = None
lepton_TightRegion = 0
if 'DataHT' not in sample.label:
if passEle and not passMu:
if GoodEle != None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
continue
elif passMu and not passEle:
if GoodMu != None and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleEle = False
SingleMu = True
else:
continue
elif passMu and passEle:
ElMu=True
else:
continue
else:
if passHT:
ElMu = True
else:
continue
if ElMu:
if GoodMu==None and GoodEle!=None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif GoodMu!=None and mu_lepton_veto and GoodEle==None:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif GoodMu!=None and GoodEle!=None:
if ele_lepton_veto and not mu_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif not ele_lepton_veto and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif ele_lepton_veto and mu_lepton_veto:
if GoodEle.pt > GoodMu.pt:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
else:
continue
else:
continue
vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)
if SingleEle==True:
if isMC:
HLT_effLumi = lumiFinder("Ele", vTrigEle, sample.year)
leptons = electrons
elif SingleMu==True:
if isMC:
HLT_effLumi = lumiFinder("Mu", vTrigMu, sample.year)
leptons = muons
elif not (SingleMu or SingleEle):
continue
if SingleEle and dataMu:
continue
if SingleMu and dataEle:
continue
if GoodLep==None or (lepton_TightRegion < 1):
if Debug:
print("exiting at lepton selection (without saving)")
continue
"""
goodJets = get_Jet(jets, 30)
bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')
if len(goodJets) < 2 or len(fatjets) < 2:
continue
for jet in goodJets:
if abs(jet.partonFlavour) == 5:
h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))
elif abs(jet.partonFlavour) == 4:
h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))
else:
h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))
outTreeFile.cd()
h2_BTaggingEff_Denom_b.Write()
h2_BTaggingEff_Denom_c.Write()
h2_BTaggingEff_Denom_udsg.Write()
h2_BTaggingEff_Num_b.Write()
h2_BTaggingEff_Num_c.Write()
h2_BTaggingEff_Num_udsg.Write()
<|reserved_special_token_0|>
h2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')
h2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')
<|reserved_special_token_0|>
h2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')
h2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')
<|reserved_special_token_0|>
h2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')
h2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')
h2_Eff_b.Write()
h2_Eff_c.Write()
h2_Eff_udsg.Write()
<|reserved_special_token_0|>
print('Ending running at ' + str(endTime))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not '_UL' in sys.argv[1]:
if sys.argv[4] == 'remote':
from samples import *
Debug = False
else:
from samples.samples import *
Debug = True
elif sys.argv[4] == 'remote':
from samplesUL import *
Debug = False
else:
from samples.samplesUL import *
Debug = True
sample = sample_dict[sys.argv[1]]
part_idx = sys.argv[2]
file_list = list(map(str, sys.argv[3].strip('[]').split(',')))
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.0
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.0
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
startTime = datetime.datetime.now()
print('Starting running at ' + str(startTime))
ROOT.gROOT.SetBatch()
leadingjet_ptcut = 150.0
chain = ROOT.TChain('Events')
for infile in file_list:
chain.Add(infile)
print('Number of events in chain ' + str(chain.GetEntries()))
tree = InputTree(chain)
print('Number of entries: ' + str(tree.GetEntries()))
isMC = True
if 'Data' in sample.name:
isMC = False
IsDim8 = False
if 'aQGC' in sample.name:
IsDim8 = True
dataEle = False
dataMu = False
if 'DataMu' in sample.name:
dataMu = True
if 'DataEle' in sample.name:
dataEle = True
username = str(os.environ.get('USER'))
inituser = str(os.environ.get('USER')[0])
outTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',
'RECREATE')
ptNBins = 100
ptMin = 0
ptMax = 1000.0
etaNBins = 60
etaMin = -3.0
etaMax = 3.0
ptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])
etabins = array.array('d', [0.0, 0.8, 1.6, 2.4])
nptbins = len(ptbins) - 1
netabins = len(etabins) - 1
h2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',
'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',
'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',
'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',
'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',
'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',
'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_b.Sumw2()
h2_BTaggingEff_Denom_c.Sumw2()
h2_BTaggingEff_Denom_udsg.Sumw2()
h2_BTaggingEff_Num_b.Sumw2()
h2_BTaggingEff_Num_c.Sumw2()
h2_BTaggingEff_Num_udsg.Sumw2()
for i in range(tree.GetEntries()):
if Debug:
if i > 100:
break
if not Debug and i % 5000 == 0:
print('Event #', i + 1, ' out of ', tree.GetEntries())
event = Event(tree, i)
electrons = Collection(event, 'Electron')
muons = Collection(event, 'Muon')
jets = Collection(event, 'Jet')
njets = len(jets)
fatjets = Collection(event, 'FatJet')
HLT = Object(event, 'HLT')
PV = Object(event, 'PV')
Flag = Object(event, 'Flag')
tightlep = None
tightlep_p4 = None
tightlep_p4t = None
tightlep_SF = None
tightlep_SFUp = None
tightlep_SFDown = None
recomet_p4t = None
PF_SF = None
PF_SFUp = None
PF_SFDown = None
PU_SF = None
PU_SFUp = None
PU_SFDown = None
year = sample.year
if isMC:
runPeriod = ''
else:
runPeriod = sample.runP
if not isMC:
if not Flag.eeBadScFilter:
continue
passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,
Flag)
if noTrigger:
continue
"""
GoodEle, ele_TightRegion = SelectLepton(electrons, False)
GoodMu, mu_TightRegion = SelectLepton(muons, True)
if GoodEle is None and GoodMu is None:
continue
ele_lepton_veto = -1
mu_lepton_veto = -1
if GoodEle != None:
ele_lepton_veto = LepVeto(GoodEle, electrons, muons)
if GoodMu != None:
mu_lepton_veto = LepVeto(GoodMu, electrons, muons)
SingleEle=False
SingleMu=False
ElMu=False
LeadLepFamily="not selected"
GoodLep = None
leptons = None
lepton_TightRegion = 0
if 'DataHT' not in sample.label:
if passEle and not passMu:
if GoodEle != None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
continue
elif passMu and not passEle:
if GoodMu != None and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleEle = False
SingleMu = True
else:
continue
elif passMu and passEle:
ElMu=True
else:
continue
else:
if passHT:
ElMu = True
else:
continue
if ElMu:
if GoodMu==None and GoodEle!=None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif GoodMu!=None and mu_lepton_veto and GoodEle==None:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif GoodMu!=None and GoodEle!=None:
if ele_lepton_veto and not mu_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif not ele_lepton_veto and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif ele_lepton_veto and mu_lepton_veto:
if GoodEle.pt > GoodMu.pt:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
else:
continue
else:
continue
vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)
if SingleEle==True:
if isMC:
HLT_effLumi = lumiFinder("Ele", vTrigEle, sample.year)
leptons = electrons
elif SingleMu==True:
if isMC:
HLT_effLumi = lumiFinder("Mu", vTrigMu, sample.year)
leptons = muons
elif not (SingleMu or SingleEle):
continue
if SingleEle and dataMu:
continue
if SingleMu and dataEle:
continue
if GoodLep==None or (lepton_TightRegion < 1):
if Debug:
print("exiting at lepton selection (without saving)")
continue
"""
goodJets = get_Jet(jets, 30)
bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')
if len(goodJets) < 2 or len(fatjets) < 2:
continue
for jet in goodJets:
if abs(jet.partonFlavour) == 5:
h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))
elif abs(jet.partonFlavour) == 4:
h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))
else:
h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))
outTreeFile.cd()
h2_BTaggingEff_Denom_b.Write()
h2_BTaggingEff_Denom_c.Write()
h2_BTaggingEff_Denom_udsg.Write()
h2_BTaggingEff_Num_b.Write()
h2_BTaggingEff_Num_c.Write()
h2_BTaggingEff_Num_udsg.Write()
h2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',
'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')
h2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')
h2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',
'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')
h2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')
h2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',
'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins
)
h2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')
h2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')
h2_Eff_b.Write()
h2_Eff_c.Write()
h2_Eff_udsg.Write()
endTime = datetime.datetime.now()
print('Ending running at ' + str(endTime))
<|reserved_special_token_1|>
import os
import sys
import ROOT
import math
import datetime
import copy
from array import array
from skimtree_utils_ssWW_wFakes_old import *
if not '_UL' in sys.argv[1]:
if sys.argv[4] == 'remote':
from samples import *
Debug = False
else:
from samples.samples import *
Debug = True
elif sys.argv[4] == 'remote':
from samplesUL import *
Debug = False
else:
from samples.samplesUL import *
Debug = True
sample = sample_dict[sys.argv[1]]
part_idx = sys.argv[2]
file_list = list(map(str, sys.argv[3].strip('[]').split(',')))
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.0
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.0
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
startTime = datetime.datetime.now()
print('Starting running at ' + str(startTime))
ROOT.gROOT.SetBatch()
leadingjet_ptcut = 150.0
chain = ROOT.TChain('Events')
for infile in file_list:
chain.Add(infile)
print('Number of events in chain ' + str(chain.GetEntries()))
tree = InputTree(chain)
print('Number of entries: ' + str(tree.GetEntries()))
isMC = True
if 'Data' in sample.name:
isMC = False
IsDim8 = False
if 'aQGC' in sample.name:
IsDim8 = True
dataEle = False
dataMu = False
if 'DataMu' in sample.name:
dataMu = True
if 'DataEle' in sample.name:
dataEle = True
username = str(os.environ.get('USER'))
inituser = str(os.environ.get('USER')[0])
outTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',
'RECREATE')
ptNBins = 100
ptMin = 0
ptMax = 1000.0
etaNBins = 60
etaMin = -3.0
etaMax = 3.0
ptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])
etabins = array.array('d', [0.0, 0.8, 1.6, 2.4])
nptbins = len(ptbins) - 1
netabins = len(etabins) - 1
h2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',
'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',
'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',
'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',
'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',
'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',
'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_b.Sumw2()
h2_BTaggingEff_Denom_c.Sumw2()
h2_BTaggingEff_Denom_udsg.Sumw2()
h2_BTaggingEff_Num_b.Sumw2()
h2_BTaggingEff_Num_c.Sumw2()
h2_BTaggingEff_Num_udsg.Sumw2()
for i in range(tree.GetEntries()):
if Debug:
if i > 100:
break
if not Debug and i % 5000 == 0:
print('Event #', i + 1, ' out of ', tree.GetEntries())
event = Event(tree, i)
electrons = Collection(event, 'Electron')
muons = Collection(event, 'Muon')
jets = Collection(event, 'Jet')
njets = len(jets)
fatjets = Collection(event, 'FatJet')
HLT = Object(event, 'HLT')
PV = Object(event, 'PV')
Flag = Object(event, 'Flag')
tightlep = None
tightlep_p4 = None
tightlep_p4t = None
tightlep_SF = None
tightlep_SFUp = None
tightlep_SFDown = None
recomet_p4t = None
PF_SF = None
PF_SFUp = None
PF_SFDown = None
PU_SF = None
PU_SFUp = None
PU_SFDown = None
year = sample.year
if isMC:
runPeriod = ''
else:
runPeriod = sample.runP
if not isMC:
if not Flag.eeBadScFilter:
continue
passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,
Flag)
if noTrigger:
continue
"""
GoodEle, ele_TightRegion = SelectLepton(electrons, False)
GoodMu, mu_TightRegion = SelectLepton(muons, True)
if GoodEle is None and GoodMu is None:
continue
ele_lepton_veto = -1
mu_lepton_veto = -1
if GoodEle != None:
ele_lepton_veto = LepVeto(GoodEle, electrons, muons)
if GoodMu != None:
mu_lepton_veto = LepVeto(GoodMu, electrons, muons)
SingleEle=False
SingleMu=False
ElMu=False
LeadLepFamily="not selected"
GoodLep = None
leptons = None
lepton_TightRegion = 0
if 'DataHT' not in sample.label:
if passEle and not passMu:
if GoodEle != None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
continue
elif passMu and not passEle:
if GoodMu != None and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleEle = False
SingleMu = True
else:
continue
elif passMu and passEle:
ElMu=True
else:
continue
else:
if passHT:
ElMu = True
else:
continue
if ElMu:
if GoodMu==None and GoodEle!=None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif GoodMu!=None and mu_lepton_veto and GoodEle==None:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif GoodMu!=None and GoodEle!=None:
if ele_lepton_veto and not mu_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif not ele_lepton_veto and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif ele_lepton_veto and mu_lepton_veto:
if GoodEle.pt > GoodMu.pt:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
else:
continue
else:
continue
vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)
if SingleEle==True:
if isMC:
HLT_effLumi = lumiFinder("Ele", vTrigEle, sample.year)
leptons = electrons
elif SingleMu==True:
if isMC:
HLT_effLumi = lumiFinder("Mu", vTrigMu, sample.year)
leptons = muons
elif not (SingleMu or SingleEle):
continue
if SingleEle and dataMu:
continue
if SingleMu and dataEle:
continue
if GoodLep==None or (lepton_TightRegion < 1):
if Debug:
print("exiting at lepton selection (without saving)")
continue
"""
goodJets = get_Jet(jets, 30)
bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')
if len(goodJets) < 2 or len(fatjets) < 2:
continue
for jet in goodJets:
if abs(jet.partonFlavour) == 5:
h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))
elif abs(jet.partonFlavour) == 4:
h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))
else:
h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))
if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:
h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))
outTreeFile.cd()
h2_BTaggingEff_Denom_b.Write()
h2_BTaggingEff_Denom_c.Write()
h2_BTaggingEff_Denom_udsg.Write()
h2_BTaggingEff_Num_b.Write()
h2_BTaggingEff_Num_c.Write()
h2_BTaggingEff_Num_udsg.Write()
h2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',
'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')
h2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')
h2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',
'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)
h2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')
h2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')
h2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',
'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins
)
h2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')
h2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')
h2_Eff_b.Write()
h2_Eff_c.Write()
h2_Eff_udsg.Write()
endTime = datetime.datetime.now()
print('Ending running at ' + str(endTime))
<|reserved_special_token_1|>
#!/bin/env python3
import os
##print(os.environ)
##print("**********************************************************************")
##print("**********************************************************************")
##print("**********************************************************************")
##print(str(os.environ.get('PYTHONPATH')))
##print(str(os.environ.get('PYTHON3PATH')))
import sys
##print("*************** This is system version info ***************************")
##print(sys.version_info)
#import platform
##print("*************** This is python version info ***************************")
##print(platform.python_version())
import ROOT
##print("Succesfully imported ROOT")
import math
import datetime
import copy
from array import array
from skimtree_utils_ssWW_wFakes_old import *
if not "_UL" in sys.argv[1]:
if sys.argv[4] == 'remote':
from samples import *
Debug = False
else:
from samples.samples import *
Debug = True
else:
if sys.argv[4] == 'remote':
from samplesUL import *
Debug = False
else:
from samples.samplesUL import *
Debug = True
sample = sample_dict[sys.argv[1]]
part_idx = sys.argv[2]
file_list = list(map(str, sys.argv[3].strip('[]').split(',')))
#print("file_list: ", file_list, "\nloop #1 over it")
#for infile in file_list:
#print(infile)
def AddOverflow(h):
nxbins = h.GetXaxis().GetNbins()
nybins = h.GetYaxis().GetNbins()
idxx = 0.
idxy = nybins + 1
for ix in range(nxbins):
idxx = ix + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(idxx, nybins)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(idxx, nybins, new_last_bincont)
idxx = nxbins + 1
idxy = 0.
for iy in range(nybins):
idxy = iy + 1
ovf_bincont = h.GetBinContent(idxx, idxy)
last_bincont = h.GetBinContent(nxbins, idxy)
new_last_bincont = ovf_bincont + last_bincont
h.SetBinContent(nxbins, idxy, new_last_bincont)
startTime = datetime.datetime.now()
print("Starting running at " + str(startTime))
ROOT.gROOT.SetBatch()
leadingjet_ptcut = 150.
chain = ROOT.TChain('Events')
#print(chain)
#print("loop #2 over file_list")
for infile in file_list:
#print("Adding %s to the chain" %(infile))
chain.Add(infile)
print("Number of events in chain " + str(chain.GetEntries()))
#print("Number of events in tree from chain " + str((chain.GetTree()).GetEntries()))
#print("Type of tree from chain " + str(type(chain.GetTree())))
#treechain = (ROOT.TTree)(chain.GetTree())
tree = InputTree(chain)
print("Number of entries: " +str(tree.GetEntries()))
#print("tree: ", tree)
isMC = True
if ('Data' in sample.name):
isMC = False
#MCReco = MCReco * isMC
IsDim8 = False
if 'aQGC' in sample.name:
IsDim8 = True
dataEle = False
dataMu = False
if 'DataMu' in sample.name:
dataMu = True
if 'DataEle' in sample.name:
dataEle = True
username = str(os.environ.get('USER'))
inituser = str(os.environ.get('USER')[0])
#folder = 'vbtag'
#if not os.path.exists("/eos/user/" + inituser + "/" + username + "/VBS/nosynch/" + folder + "/" + sample.label):
#os.makedirs("/eos/user/" + inituser + "/" + username +"/VBS/nosynch/" + folder + "/" + sample.label)
#outpath = "/eos/user/" + inituser + "/" + username +"/VBS/nosynch/" + folder + "/" + sample.label + "/"
#++++++++++++++++++++++++++++++++++
#++ branching the new trees ++
#++++++++++++++++++++++++++++++++++
#print(outpath + sample.label+"_part"+str(part_idx)+".root")
outTreeFile = ROOT.TFile(sample.label+"_part"+str(part_idx)+".root", "RECREATE") #some name of the output file
#++++++++++++++++++++++++++++++++++
#++ All category ++
#++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++
#++ Efficiency studies ++
#++++++++++++++++++++++++++++++++++
ptNBins = 100
ptMin = 0
ptMax = 1000.
etaNBins = 60
etaMin = -3.
etaMax = 3.
ptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])
etabins = array.array('d', [0.0, 0.8, 1.6, 2.4])
nptbins = len(ptbins)-1
netabins = len(etabins)-1
h2_BTaggingEff_Denom_b = ROOT.TH2D("h2_BTaggingEff_Denom_b", "MC bjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_c = ROOT.TH2D("h2_BTaggingEff_Denom_c", "MC cjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_udsg = ROOT.TH2D("h2_BTaggingEff_Denom_udsg", "MC ljet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_b = ROOT.TH2D("h2_BTaggingEff_Num_b", "Tagged bjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_c = ROOT.TH2D("h2_BTaggingEff_Num_c", "Tagged cjet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Num_udsg = ROOT.TH2D("h2_BTaggingEff_Num_udsg", "Tagged ljet;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_BTaggingEff_Denom_b.Sumw2()
h2_BTaggingEff_Denom_c.Sumw2()
h2_BTaggingEff_Denom_udsg.Sumw2()
h2_BTaggingEff_Num_b.Sumw2()
h2_BTaggingEff_Num_c.Sumw2()
h2_BTaggingEff_Num_udsg.Sumw2()
#++++++++++++++++++++++++++++++++++
#++ looping over the events ++
#++++++++++++++++++++++++++++++++++
for i in range(tree.GetEntries()):
#++++++++++++++++++++++++++++++++++
#++ taking objects ++
#++++++++++++++++++++++++++++++++++
if Debug:
if i > 100:
break
if not Debug and i%5000 == 0:
print("Event #", i+1, " out of ", tree.GetEntries())
event = Event(tree,i)
electrons = Collection(event, "Electron")
muons = Collection(event, "Muon")
jets = Collection(event, "Jet")
njets = len(jets)
fatjets = Collection(event, "FatJet")
HLT = Object(event, "HLT")
PV = Object(event, "PV")
Flag = Object(event, 'Flag')
#++++++++++++++++++++++++++++++++++
#++ defining variables ++
#++++++++++++++++++++++++++++++++++
tightlep = None
tightlep_p4 = None
tightlep_p4t = None
tightlep_SF = None
tightlep_SFUp = None
tightlep_SFDown = None
recomet_p4t = None
PF_SF = None
PF_SFUp = None
PF_SFDown = None
PU_SF = None
PU_SFUp = None
PU_SFDown = None
#++++++++++++++++++++++++++++++++++
#++ starting the analysis ++
#++++++++++++++++++++++++++++++++++
#VetoMu = get_LooseMu(muons)
#goodMu = get_Mu(muons)
#VetoEle = get_LooseEle(electrons)
#goodEle = get_Ele(electrons)
year = sample.year
if(isMC):
runPeriod = ''
else:
runPeriod = sample.runP
if not isMC:
if not Flag.eeBadScFilter:
continue
#print "------ ", i
passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod, Flag)
if noTrigger:
continue
'''
GoodEle, ele_TightRegion = SelectLepton(electrons, False)
GoodMu, mu_TightRegion = SelectLepton(muons, True)
if GoodEle is None and GoodMu is None:
continue
ele_lepton_veto = -1
mu_lepton_veto = -1
if GoodEle != None:
ele_lepton_veto = LepVeto(GoodEle, electrons, muons)
if GoodMu != None:
mu_lepton_veto = LepVeto(GoodMu, electrons, muons)
SingleEle=False
SingleMu=False
ElMu=False
LeadLepFamily="not selected"
GoodLep = None
leptons = None
lepton_TightRegion = 0
if 'DataHT' not in sample.label:
if passEle and not passMu:
if GoodEle != None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
continue
elif passMu and not passEle:
if GoodMu != None and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleEle = False
SingleMu = True
else:
continue
elif passMu and passEle:
ElMu=True
else:
continue
else:
if passHT:
ElMu = True
else:
continue
if ElMu:
if GoodMu==None and GoodEle!=None and ele_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif GoodMu!=None and mu_lepton_veto and GoodEle==None:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif GoodMu!=None and GoodEle!=None:
if ele_lepton_veto and not mu_lepton_veto:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
elif not ele_lepton_veto and mu_lepton_veto:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
elif ele_lepton_veto and mu_lepton_veto:
if GoodEle.pt > GoodMu.pt:
GoodLep = GoodEle
lepton_TightRegion = copy.deepcopy(ele_TightRegion)
SingleEle = True
SingleMu = False
else:
GoodLep = GoodMu
lepton_TightRegion = copy.deepcopy(mu_TightRegion)
SingleMu = True
SingleEle = False
else:
continue
else:
continue
vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)
if SingleEle==True:
if isMC:
HLT_effLumi = lumiFinder("Ele", vTrigEle, sample.year)
leptons = electrons
elif SingleMu==True:
if isMC:
HLT_effLumi = lumiFinder("Mu", vTrigMu, sample.year)
leptons = muons
elif not (SingleMu or SingleEle):
continue
if SingleEle and dataMu:
continue
if SingleMu and dataEle:
continue
if GoodLep==None or (lepton_TightRegion < 1):
if Debug:
print("exiting at lepton selection (without saving)")
continue
'''
######################################
## Selecting only jets with pt>30 ##
######################################
goodJets = get_Jet(jets, 30)
bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')
if (len(goodJets) < 2 or len(fatjets) < 2):
continue
for jet in goodJets:
if(abs(jet.partonFlavour) == 5):
h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))
elif(abs(jet.partonFlavour) == 4):
h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))
else:
h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))
if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):
h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))
outTreeFile.cd()
h2_BTaggingEff_Denom_b.Write()
h2_BTaggingEff_Denom_c.Write()
h2_BTaggingEff_Denom_udsg.Write()
h2_BTaggingEff_Num_b.Write()
h2_BTaggingEff_Num_c.Write()
h2_BTaggingEff_Num_udsg.Write()
h2_Eff_b = ROOT.TEfficiency("h2_BTaggingEff_b", "bjet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, "")
h2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, "")
h2_Eff_c = ROOT.TEfficiency("h2_BTaggingEff_c", "cjet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, "")
h2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, "")
h2_Eff_udsg = ROOT.TEfficiency("h2_BTaggingEff_udsg", "light jet efficiency;p_{T} [GeV];#eta", nptbins, ptbins, netabins, etabins)
h2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, "")
h2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, "")
h2_Eff_b.Write()
h2_Eff_c.Write()
h2_Eff_udsg.Write()
endTime = datetime.datetime.now()
print("Ending running at " + str(endTime))
|
flexible
|
{
"blob_id": "b49696d6cac5fbf97172aa7cf16903d002262b5c",
"index": 1940,
"step-1": "<mask token>\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\n<mask token>\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\n<mask token>\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\n<mask token>\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\n<mask token>\nprint('Number of entries: ' + str(tree.GetEntries()))\n<mask token>\nif 'Data' in sample.name:\n isMC = False\n<mask token>\nif 'aQGC' in sample.name:\n IsDim8 = True\n<mask token>\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\n<mask token>\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\n<mask token>\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\n<mask token>\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\n<mask token>\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\n<mask token>\nprint('Ending running at ' + str(endTime))\n",
"step-3": "<mask token>\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\nleadingjet_ptcut = 150.0\nchain = ROOT.TChain('Events')\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\ntree = InputTree(chain)\nprint('Number of entries: ' + str(tree.GetEntries()))\nisMC = True\nif 'Data' in sample.name:\n isMC = False\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\noutTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',\n 'RECREATE')\nptNBins = 100\nptMin = 0\nptMax = 1000.0\netaNBins = 60\netaMin = -3.0\netaMax = 3.0\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins) - 1\nnetabins = len(etabins) - 1\nh2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',\n 'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',\n 'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',\n 'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',\n 'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',\n 'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',\n 'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\nh2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',\n 'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\nh2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',\n 'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\nh2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',\n 'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins\n )\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\nendTime = datetime.datetime.now()\nprint('Ending running at ' + str(endTime))\n",
"step-4": "import os\nimport sys\nimport ROOT\nimport math\nimport datetime\nimport copy\nfrom array import array\nfrom skimtree_utils_ssWW_wFakes_old import *\nif not '_UL' in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelif sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\nelse:\n from samples.samplesUL import *\n Debug = True\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n idxx = 0.0\n idxy = nybins + 1\n for ix in range(nxbins):\n idxx = ix + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n idxx = nxbins + 1\n idxy = 0.0\n for iy in range(nybins):\n idxy = iy + 1\n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint('Starting running at ' + str(startTime))\nROOT.gROOT.SetBatch()\nleadingjet_ptcut = 150.0\nchain = ROOT.TChain('Events')\nfor infile in file_list:\n chain.Add(infile)\nprint('Number of events in chain ' + str(chain.GetEntries()))\ntree = InputTree(chain)\nprint('Number of entries: ' + str(tree.GetEntries()))\nisMC = True\nif 'Data' in sample.name:\n isMC = False\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\noutTreeFile = ROOT.TFile(sample.label + '_part' + str(part_idx) + '.root',\n 'RECREATE')\nptNBins = 100\nptMin = 0\nptMax = 1000.0\netaNBins = 60\netaMin = -3.0\netaMax = 3.0\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins) - 1\nnetabins = len(etabins) - 1\nh2_BTaggingEff_Denom_b = ROOT.TH2D('h2_BTaggingEff_Denom_b',\n 'MC bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D('h2_BTaggingEff_Denom_c',\n 'MC cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D('h2_BTaggingEff_Denom_udsg',\n 'MC ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D('h2_BTaggingEff_Num_b',\n 'Tagged bjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D('h2_BTaggingEff_Num_c',\n 'Tagged cjet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D('h2_BTaggingEff_Num_udsg',\n 'Tagged ljet;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\nfor i in range(tree.GetEntries()):\n if Debug:\n if i > 100:\n break\n if not Debug and i % 5000 == 0:\n print('Event #', i + 1, ' out of ', tree.GetEntries())\n event = Event(tree, i)\n electrons = Collection(event, 'Electron')\n muons = Collection(event, 'Muon')\n jets = Collection(event, 'Jet')\n njets = len(jets)\n fatjets = Collection(event, 'FatJet')\n HLT = Object(event, 'HLT')\n PV = Object(event, 'PV')\n Flag = Object(event, 'Flag')\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n year = sample.year\n if isMC:\n runPeriod = ''\n else:\n runPeriod = sample.runP\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod,\n Flag)\n if noTrigger:\n continue\n \"\"\"\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n \"\"\"\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n if len(goodJets) < 2 or len(fatjets) < 2:\n continue\n for jet in goodJets:\n if abs(jet.partonFlavour) == 5:\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif abs(jet.partonFlavour) == 4:\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if len(bjet_filter([jet], 'DeepFlv', 'M')[0]) == 1:\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\nh2_Eff_b = ROOT.TEfficiency('h2_BTaggingEff_b',\n 'bjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, '')\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, '')\nh2_Eff_c = ROOT.TEfficiency('h2_BTaggingEff_c',\n 'cjet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, '')\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, '')\nh2_Eff_udsg = ROOT.TEfficiency('h2_BTaggingEff_udsg',\n 'light jet efficiency;p_{T} [GeV];#eta', nptbins, ptbins, netabins, etabins\n )\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, '')\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, '')\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\nendTime = datetime.datetime.now()\nprint('Ending running at ' + str(endTime))\n",
"step-5": "#!/bin/env python3\nimport os\n##print(os.environ)\n##print(\"**********************************************************************\")\n##print(\"**********************************************************************\")\n##print(\"**********************************************************************\")\n##print(str(os.environ.get('PYTHONPATH')))\n##print(str(os.environ.get('PYTHON3PATH')))\nimport sys\n##print(\"*************** This is system version info ***************************\")\n##print(sys.version_info)\n#import platform\n##print(\"*************** This is python version info ***************************\")\n##print(platform.python_version())\nimport ROOT\n##print(\"Succesfully imported ROOT\")\nimport math\nimport datetime\nimport copy\nfrom array import array\nfrom skimtree_utils_ssWW_wFakes_old import *\n\nif not \"_UL\" in sys.argv[1]:\n if sys.argv[4] == 'remote':\n from samples import *\n Debug = False\n else:\n from samples.samples import *\n Debug = True\nelse:\n if sys.argv[4] == 'remote':\n from samplesUL import *\n Debug = False\n else:\n from samples.samplesUL import *\n Debug = True\n\nsample = sample_dict[sys.argv[1]]\npart_idx = sys.argv[2]\nfile_list = list(map(str, sys.argv[3].strip('[]').split(',')))\n#print(\"file_list: \", file_list, \"\\nloop #1 over it\")\n#for infile in file_list:\n #print(infile)\n\ndef AddOverflow(h):\n nxbins = h.GetXaxis().GetNbins()\n nybins = h.GetYaxis().GetNbins()\n\n idxx = 0.\n idxy = nybins + 1 \n for ix in range(nxbins):\n idxx = ix + 1 \n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(idxx, nybins)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(idxx, nybins, new_last_bincont)\n\n idxx = nxbins + 1 \n idxy = 0. \n for iy in range(nybins):\n idxy = iy + 1 \n ovf_bincont = h.GetBinContent(idxx, idxy)\n last_bincont = h.GetBinContent(nxbins, idxy)\n new_last_bincont = ovf_bincont + last_bincont\n h.SetBinContent(nxbins, idxy, new_last_bincont)\n\n\nstartTime = datetime.datetime.now()\nprint(\"Starting running at \" + str(startTime))\n\nROOT.gROOT.SetBatch()\n\nleadingjet_ptcut = 150.\n\nchain = ROOT.TChain('Events')\n#print(chain)\n#print(\"loop #2 over file_list\")\nfor infile in file_list: \n #print(\"Adding %s to the chain\" %(infile))\n chain.Add(infile)\n\nprint(\"Number of events in chain \" + str(chain.GetEntries()))\n#print(\"Number of events in tree from chain \" + str((chain.GetTree()).GetEntries()))\n#print(\"Type of tree from chain \" + str(type(chain.GetTree())))\n#treechain = (ROOT.TTree)(chain.GetTree())\ntree = InputTree(chain)\nprint(\"Number of entries: \" +str(tree.GetEntries()))\n#print(\"tree: \", tree)\n\nisMC = True\nif ('Data' in sample.name):\n isMC = False\n\n#MCReco = MCReco * isMC\n\nIsDim8 = False\nif 'aQGC' in sample.name:\n IsDim8 = True\n\ndataEle = False\ndataMu = False\nif 'DataMu' in sample.name:\n dataMu = True\nif 'DataEle' in sample.name:\n dataEle = True\n\nusername = str(os.environ.get('USER'))\ninituser = str(os.environ.get('USER')[0])\n#folder = 'vbtag'\n#if not os.path.exists(\"/eos/user/\" + inituser + \"/\" + username + \"/VBS/nosynch/\" + folder + \"/\" + sample.label):\n #os.makedirs(\"/eos/user/\" + inituser + \"/\" + username +\"/VBS/nosynch/\" + folder + \"/\" + sample.label)\n#outpath = \"/eos/user/\" + inituser + \"/\" + username +\"/VBS/nosynch/\" + folder + \"/\" + sample.label + \"/\"\n#++++++++++++++++++++++++++++++++++\n#++ branching the new trees ++\n#++++++++++++++++++++++++++++++++++\n#print(outpath + sample.label+\"_part\"+str(part_idx)+\".root\")\noutTreeFile = ROOT.TFile(sample.label+\"_part\"+str(part_idx)+\".root\", \"RECREATE\") #some name of the output file\n\n#++++++++++++++++++++++++++++++++++\n#++ All category ++\n#++++++++++++++++++++++++++++++++++\n\n#++++++++++++++++++++++++++++++++++\n#++ Efficiency studies ++\n#++++++++++++++++++++++++++++++++++\nptNBins = 100\nptMin = 0\nptMax = 1000.\netaNBins = 60\netaMin = -3.\netaMax = 3.\nptbins = array.array('d', [30, 50, 80, 140, 200, 300, 600, 1000])\netabins = array.array('d', [0.0, 0.8, 1.6, 2.4])\nnptbins = len(ptbins)-1\nnetabins = len(etabins)-1\nh2_BTaggingEff_Denom_b = ROOT.TH2D(\"h2_BTaggingEff_Denom_b\", \"MC bjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_c = ROOT.TH2D(\"h2_BTaggingEff_Denom_c\", \"MC cjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Denom_udsg = ROOT.TH2D(\"h2_BTaggingEff_Denom_udsg\", \"MC ljet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_b = ROOT.TH2D(\"h2_BTaggingEff_Num_b\", \"Tagged bjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_c = ROOT.TH2D(\"h2_BTaggingEff_Num_c\", \"Tagged cjet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_BTaggingEff_Num_udsg = ROOT.TH2D(\"h2_BTaggingEff_Num_udsg\", \"Tagged ljet;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\n\nh2_BTaggingEff_Denom_b.Sumw2()\nh2_BTaggingEff_Denom_c.Sumw2()\nh2_BTaggingEff_Denom_udsg.Sumw2()\nh2_BTaggingEff_Num_b.Sumw2()\nh2_BTaggingEff_Num_c.Sumw2()\nh2_BTaggingEff_Num_udsg.Sumw2()\n\n#++++++++++++++++++++++++++++++++++\n#++ looping over the events ++\n#++++++++++++++++++++++++++++++++++\nfor i in range(tree.GetEntries()):\n #++++++++++++++++++++++++++++++++++\n #++ taking objects ++\n #++++++++++++++++++++++++++++++++++\n if Debug:\n if i > 100:\n break\n if not Debug and i%5000 == 0:\n print(\"Event #\", i+1, \" out of \", tree.GetEntries())\n event = Event(tree,i)\n electrons = Collection(event, \"Electron\")\n muons = Collection(event, \"Muon\")\n jets = Collection(event, \"Jet\")\n njets = len(jets)\n fatjets = Collection(event, \"FatJet\")\n HLT = Object(event, \"HLT\")\n PV = Object(event, \"PV\")\n Flag = Object(event, 'Flag')\n\n #++++++++++++++++++++++++++++++++++\n #++ defining variables ++\n #++++++++++++++++++++++++++++++++++\n tightlep = None\n tightlep_p4 = None\n tightlep_p4t = None\n tightlep_SF = None\n tightlep_SFUp = None\n tightlep_SFDown = None\n recomet_p4t = None\n PF_SF = None\n PF_SFUp = None\n PF_SFDown = None\n PU_SF = None\n PU_SFUp = None\n PU_SFDown = None\n #++++++++++++++++++++++++++++++++++\n #++ starting the analysis ++\n #++++++++++++++++++++++++++++++++++\n #VetoMu = get_LooseMu(muons)\n #goodMu = get_Mu(muons)\n #VetoEle = get_LooseEle(electrons)\n #goodEle = get_Ele(electrons)\n year = sample.year\n if(isMC):\n runPeriod = ''\n else:\n runPeriod = sample.runP\n\n if not isMC:\n if not Flag.eeBadScFilter:\n continue\n\n #print \"------ \", i\n passMu, passEle, passHT, noTrigger = trig_map(HLT, PV, year, runPeriod, Flag)\n\n if noTrigger:\n continue\n\n '''\n GoodEle, ele_TightRegion = SelectLepton(electrons, False) \n GoodMu, mu_TightRegion = SelectLepton(muons, True) \n \n if GoodEle is None and GoodMu is None:\n continue\n\n ele_lepton_veto = -1\n mu_lepton_veto = -1\n\n if GoodEle != None:\n ele_lepton_veto = LepVeto(GoodEle, electrons, muons)\n if GoodMu != None:\n mu_lepton_veto = LepVeto(GoodMu, electrons, muons)\n\n SingleEle=False\n SingleMu=False\n ElMu=False\n\n LeadLepFamily=\"not selected\"\n \n GoodLep = None\n leptons = None\n\n lepton_TightRegion = 0\n\n if 'DataHT' not in sample.label:\n if passEle and not passMu:\n if GoodEle != None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n continue\n\n elif passMu and not passEle:\n if GoodMu != None and mu_lepton_veto:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleEle = False\n SingleMu = True\n else:\n continue\n\n elif passMu and passEle:\n ElMu=True\n \n else:\n continue\n\n\n else:\n if passHT:\n ElMu = True\n else:\n continue\n\n if ElMu:\n if GoodMu==None and GoodEle!=None and ele_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n\n elif GoodMu!=None and mu_lepton_veto and GoodEle==None:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n elif GoodMu!=None and GoodEle!=None:\n if ele_lepton_veto and not mu_lepton_veto:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n elif not ele_lepton_veto and mu_lepton_veto: \n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n\n elif ele_lepton_veto and mu_lepton_veto:\n if GoodEle.pt > GoodMu.pt:\n GoodLep = GoodEle\n lepton_TightRegion = copy.deepcopy(ele_TightRegion)\n SingleEle = True\n SingleMu = False\n else:\n GoodLep = GoodMu\n lepton_TightRegion = copy.deepcopy(mu_TightRegion)\n SingleMu = True\n SingleEle = False\n \n else:\n continue\n\n else:\n continue\n\n vTrigEle, vTrigMu, vTrigHT = trig_finder(HLT, sample.year, sample.label)\n \n if SingleEle==True:\n if isMC: \n HLT_effLumi = lumiFinder(\"Ele\", vTrigEle, sample.year)\n leptons = electrons\n elif SingleMu==True:\n if isMC:\n HLT_effLumi = lumiFinder(\"Mu\", vTrigMu, sample.year)\n leptons = muons\n\n elif not (SingleMu or SingleEle):\n continue\n\n if SingleEle and dataMu:\n continue\n if SingleMu and dataEle:\n continue\n \n if GoodLep==None or (lepton_TightRegion < 1):\n if Debug:\n print(\"exiting at lepton selection (without saving)\")\n continue\n '''\n\n ######################################\n ## Selecting only jets with pt>30 ##\n ######################################\n goodJets = get_Jet(jets, 30)\n bjets, nobjets = bjet_filter(goodJets, 'DeepFlv', 'M')\n\n if (len(goodJets) < 2 or len(fatjets) < 2):\n continue\n\n for jet in goodJets:\n if(abs(jet.partonFlavour) == 5):\n h2_BTaggingEff_Denom_b.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_b.Fill(jet.pt, abs(jet.eta))\n elif(abs(jet.partonFlavour) == 4):\n h2_BTaggingEff_Denom_c.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_c.Fill(jet.pt, abs(jet.eta))\n else:\n h2_BTaggingEff_Denom_udsg.Fill(jet.pt, abs(jet.eta))\n if(len(bjet_filter([jet], 'DeepFlv', 'M')[0])==1):\n h2_BTaggingEff_Num_udsg.Fill(jet.pt, abs(jet.eta))\n\noutTreeFile.cd()\nh2_BTaggingEff_Denom_b.Write()\nh2_BTaggingEff_Denom_c.Write()\nh2_BTaggingEff_Denom_udsg.Write()\nh2_BTaggingEff_Num_b.Write()\nh2_BTaggingEff_Num_c.Write()\nh2_BTaggingEff_Num_udsg.Write()\n\nh2_Eff_b = ROOT.TEfficiency(\"h2_BTaggingEff_b\", \"bjet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_b.SetTotalHistogram(h2_BTaggingEff_Denom_b, \"\")\nh2_Eff_b.SetPassedHistogram(h2_BTaggingEff_Num_b, \"\")\n\nh2_Eff_c = ROOT.TEfficiency(\"h2_BTaggingEff_c\", \"cjet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_c.SetTotalHistogram(h2_BTaggingEff_Denom_c, \"\")\nh2_Eff_c.SetPassedHistogram(h2_BTaggingEff_Num_c, \"\")\n\nh2_Eff_udsg = ROOT.TEfficiency(\"h2_BTaggingEff_udsg\", \"light jet efficiency;p_{T} [GeV];#eta\", nptbins, ptbins, netabins, etabins)\nh2_Eff_udsg.SetTotalHistogram(h2_BTaggingEff_Denom_udsg, \"\")\nh2_Eff_udsg.SetPassedHistogram(h2_BTaggingEff_Num_udsg, \"\")\n\nh2_Eff_b.Write()\nh2_Eff_c.Write()\nh2_Eff_udsg.Write()\n\nendTime = datetime.datetime.now()\nprint(\"Ending running at \" + str(endTime))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#-------------------------------------------------------------------------------
# rtlconverter.py
#
# PyCoRAM RTL Converter
#
# Copyright (C) 2013, Shinya Takamaeda-Yamazaki
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import subprocess
import copy
import collections
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )
import utils.version
if sys.version_info[0] >= 3:
from rtlconverter.convertvisitor import InstanceConvertVisitor
from rtlconverter.convertvisitor import InstanceReplaceVisitor
else:
from convertvisitor import InstanceConvertVisitor
from convertvisitor import InstanceReplaceVisitor
import pyverilog.utils.signaltype as signaltype
from pyverilog.utils.scope import ScopeLabel, ScopeChain
import pyverilog.vparser.ast as vast
from pyverilog.vparser.parser import VerilogCodeParser
from pyverilog.dataflow.modulevisitor import ModuleVisitor
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
class RtlConverter(object):
def __init__(self, filelist, topmodule='userlogic', include=None,
define=None, single_clock=False):
self.filelist = filelist
self.topmodule = topmodule
self.include = include
self.define = define
self.single_clock = single_clock
self.top_parameters = collections.OrderedDict()
self.top_ioports = collections.OrderedDict()
self.coram_object = collections.OrderedDict()
def getTopParameters(self):
return self.top_parameters
def getTopIOPorts(self):
return self.top_ioports
def getCoramObject(self):
return self.coram_object
def dumpCoramObject(self):
coram_object = self.getCoramObject()
print("----------------------------------------")
print("CoRAM Objects in User-defined RTL")
for mode, coram_items in coram_object.items():
print(" CoRAM %s" % mode)
for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):
print(" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)" %
(mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),
threadname, str(addrwidth), str(datawidth)))
def generate(self):
preprocess_define = []
if self.single_clock:
preprocess_define.append('CORAM_SINGLE_CLOCK')
if self.define:
preprocess_define.extend(self.define)
code_parser = VerilogCodeParser(self.filelist,
preprocess_include=self.include,
preprocess_define=preprocess_define)
ast = code_parser.parse()
module_visitor = ModuleVisitor()
module_visitor.visit(ast)
modulenames = module_visitor.get_modulenames()
moduleinfotable = module_visitor.get_moduleinfotable()
instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)
instanceconvert_visitor.start_visit()
replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()
replaced_instports = instanceconvert_visitor.getReplacedInstPorts()
replaced_items = instanceconvert_visitor.getReplacedItems()
new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()
instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,
replaced_instports,
replaced_items,
new_moduleinfotable)
ret = instancereplace_visitor.getAST()
# gather user-defined io-ports on top-module and parameters to connect external
frametable = instanceconvert_visitor.getFrameTable()
top_ioports = []
for i in moduleinfotable.getIOPorts(self.topmodule):
if signaltype.isClock(i) or signaltype.isReset(i): continue
top_ioports.append(i)
top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )
top_sigs = frametable.getSignals(top_scope)
top_params = frametable.getConsts(top_scope)
for sk, sv in top_sigs.items():
if len(sk) > 2: continue
signame = sk[1].scopename
for svv in sv:
if (signame in top_ioports and
not (signaltype.isClock(signame) or signaltype.isReset(signame)) and
isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):
port = svv
msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))
lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))
width = int(msb_val.value) - int(lsb_val.value) + 1
self.top_ioports[signame] = (port, width)
break
for ck, cv in top_params.items():
if len(ck) > 2: continue
signame = ck[1].scopename
param = cv[0]
if isinstance(param, vast.Genvar): continue
self.top_parameters[signame] = param
self.coram_object = instanceconvert_visitor.getCoramObject()
return ret
def main():
from optparse import OptionParser
INFO = "PyCoRAM RTL Converter"
VERSION = utils.version.VERSION
USAGE = "Usage: python rtlconverter.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-t","--top",dest="topmodule",
default="userlogic",help="Top module, Default=userlogic")
optparser.add_option("-o","--output",dest="outputfile",
default="out.v",help="Output file name, Default=out.v")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("--singleclock",action="store_true",dest="single_clock",
default=False,help="Use single clock mode")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
converter = RtlConverter(filelist, options.topmodule,
include=options.include,
define=options.define,
single_clock=options.single_clock)
ast = converter.generate()
converter.dumpCoramObject()
asttocode = ASTCodeGenerator()
code = asttocode.visit(ast)
f = open(options.outputfile, 'w')
f.write(code)
f.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "55ffcf5e6120cc07da461e30979dd8a36a599bee",
"index": 8353,
"step-1": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n<mask token>\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n<mask token>\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport os\nimport subprocess\nimport copy\nimport collections\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport utils.version\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\n\nclass RtlConverter(object):\n\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n\n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print('----------------------------------------')\n print('CoRAM Objects in User-defined RTL')\n for mode, coram_items in coram_object.items():\n print(' CoRAM %s' % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(\n coram_items, key=lambda x: x[1]):\n print(' %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)' %\n (mode, idx, '' if subid is None else ''.join(('[', str(\n subid), ']')), threadname, str(addrwidth), str(datawidth)))\n\n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n code_parser = VerilogCodeParser(self.filelist, preprocess_include=\n self.include, preprocess_define=preprocess_define)\n ast = code_parser.parse()\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable,\n self.topmodule)\n instanceconvert_visitor.start_visit()\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems()\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance,\n replaced_instports, replaced_items, new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i):\n continue\n top_ioports.append(i)\n top_scope = ScopeChain([ScopeLabel(self.topmodule, 'module')])\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n for sk, sv in top_sigs.items():\n if len(sk) > 2:\n continue\n signame = sk[1].scopename\n for svv in sv:\n if signame in top_ioports and not (signaltype.isClock(\n signame) or signaltype.isReset(signame)) and isinstance(svv\n , vast.Input) or isinstance(svv, vast.Output\n ) or isinstance(svv, vast.Inout):\n port = svv\n msb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.msb,\n top_scope))\n lsb_val = instanceconvert_visitor.optimize(\n instanceconvert_visitor.getTree(port.width.lsb,\n top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = port, width\n break\n for ck, cv in top_params.items():\n if len(ck) > 2:\n continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar):\n continue\n self.top_parameters[signame] = param\n self.coram_object = instanceconvert_visitor.getCoramObject()\n return ret\n\n\ndef main():\n from optparse import OptionParser\n INFO = 'PyCoRAM RTL Converter'\n VERSION = utils.version.VERSION\n USAGE = 'Usage: python rtlconverter.py -t TOPMODULE file ...'\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n optparser = OptionParser()\n optparser.add_option('-v', '--version', action='store_true', dest=\n 'showversion', default=False, help='Show the version')\n optparser.add_option('-t', '--top', dest='topmodule', default=\n 'userlogic', help='Top module, Default=userlogic')\n optparser.add_option('-o', '--output', dest='outputfile', default=\n 'out.v', help='Output file name, Default=out.v')\n optparser.add_option('-I', '--include', dest='include', action='append',\n default=[], help='Include path')\n optparser.add_option('-D', dest='define', action='append', default=[],\n help='Macro Definition')\n optparser.add_option('--singleclock', action='store_true', dest=\n 'single_clock', default=False, help='Use single clock mode')\n options, args = optparser.parse_args()\n filelist = args\n if options.showversion:\n showVersion()\n for f in filelist:\n if not os.path.exists(f):\n raise IOError('file not found: ' + f)\n if len(filelist) == 0:\n showVersion()\n converter = RtlConverter(filelist, options.topmodule, include=options.\n include, define=options.define, single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#-------------------------------------------------------------------------------\n# rtlconverter.py\n# \n# PyCoRAM RTL Converter\n#\n# Copyright (C) 2013, Shinya Takamaeda-Yamazaki\n# License: Apache 2.0\n#-------------------------------------------------------------------------------\nimport sys\nimport os\nimport subprocess\nimport copy\nimport collections\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))) )\n\nimport utils.version\n\nif sys.version_info[0] >= 3:\n from rtlconverter.convertvisitor import InstanceConvertVisitor\n from rtlconverter.convertvisitor import InstanceReplaceVisitor\nelse:\n from convertvisitor import InstanceConvertVisitor\n from convertvisitor import InstanceReplaceVisitor\n\nimport pyverilog.utils.signaltype as signaltype\nfrom pyverilog.utils.scope import ScopeLabel, ScopeChain\nimport pyverilog.vparser.ast as vast\nfrom pyverilog.vparser.parser import VerilogCodeParser\nfrom pyverilog.dataflow.modulevisitor import ModuleVisitor\nfrom pyverilog.ast_code_generator.codegen import ASTCodeGenerator\n\nclass RtlConverter(object):\n def __init__(self, filelist, topmodule='userlogic', include=None,\n define=None, single_clock=False):\n self.filelist = filelist\n self.topmodule = topmodule\n self.include = include\n self.define = define\n self.single_clock = single_clock\n\n self.top_parameters = collections.OrderedDict()\n self.top_ioports = collections.OrderedDict()\n self.coram_object = collections.OrderedDict()\n\n def getTopParameters(self):\n return self.top_parameters\n \n def getTopIOPorts(self):\n return self.top_ioports\n\n def getCoramObject(self):\n return self.coram_object\n\n def dumpCoramObject(self):\n coram_object = self.getCoramObject()\n print(\"----------------------------------------\")\n print(\"CoRAM Objects in User-defined RTL\")\n for mode, coram_items in coram_object.items():\n print(\" CoRAM %s\" % mode)\n for threadname, idx, subid, addrwidth, datawidth in sorted(coram_items, key=lambda x:x[1]):\n print(\" %s(ID:%d%s Thread:%s AddrWidth:%s DataWidth:%s)\" %\n (mode, idx, ( '' if subid is None else ''.join( ('[', str(subid), ']') ) ),\n threadname, str(addrwidth), str(datawidth)))\n \n def generate(self):\n preprocess_define = []\n if self.single_clock:\n preprocess_define.append('CORAM_SINGLE_CLOCK')\n if self.define:\n preprocess_define.extend(self.define)\n\n code_parser = VerilogCodeParser(self.filelist,\n preprocess_include=self.include,\n preprocess_define=preprocess_define)\n ast = code_parser.parse()\n\n module_visitor = ModuleVisitor()\n module_visitor.visit(ast)\n modulenames = module_visitor.get_modulenames()\n moduleinfotable = module_visitor.get_moduleinfotable()\n\n instanceconvert_visitor = InstanceConvertVisitor(moduleinfotable, self.topmodule)\n instanceconvert_visitor.start_visit()\n\n replaced_instance = instanceconvert_visitor.getMergedReplacedInstance()\n replaced_instports = instanceconvert_visitor.getReplacedInstPorts()\n replaced_items = instanceconvert_visitor.getReplacedItems() \n\n new_moduleinfotable = instanceconvert_visitor.get_new_moduleinfotable()\n instancereplace_visitor = InstanceReplaceVisitor(replaced_instance, \n replaced_instports,\n replaced_items,\n new_moduleinfotable)\n ret = instancereplace_visitor.getAST()\n\n # gather user-defined io-ports on top-module and parameters to connect external\n frametable = instanceconvert_visitor.getFrameTable()\n top_ioports = []\n for i in moduleinfotable.getIOPorts(self.topmodule):\n if signaltype.isClock(i) or signaltype.isReset(i): continue\n top_ioports.append(i)\n\n top_scope = ScopeChain( [ScopeLabel(self.topmodule, 'module')] )\n top_sigs = frametable.getSignals(top_scope)\n top_params = frametable.getConsts(top_scope)\n\n for sk, sv in top_sigs.items():\n if len(sk) > 2: continue\n signame = sk[1].scopename\n for svv in sv:\n if (signame in top_ioports and \n not (signaltype.isClock(signame) or signaltype.isReset(signame)) and\n isinstance(svv, vast.Input) or isinstance(svv, vast.Output) or isinstance(svv, vast.Inout)):\n port = svv\n msb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.msb, top_scope))\n lsb_val = instanceconvert_visitor.optimize(instanceconvert_visitor.getTree(port.width.lsb, top_scope))\n width = int(msb_val.value) - int(lsb_val.value) + 1\n self.top_ioports[signame] = (port, width)\n break\n\n for ck, cv in top_params.items():\n if len(ck) > 2: continue\n signame = ck[1].scopename\n param = cv[0]\n if isinstance(param, vast.Genvar): continue\n self.top_parameters[signame] = param\n\n self.coram_object = instanceconvert_visitor.getCoramObject()\n\n return ret\n\ndef main():\n from optparse import OptionParser\n INFO = \"PyCoRAM RTL Converter\"\n VERSION = utils.version.VERSION\n USAGE = \"Usage: python rtlconverter.py -t TOPMODULE file ...\"\n\n def showVersion():\n print(INFO)\n print(VERSION)\n print(USAGE)\n sys.exit()\n \n optparser = OptionParser()\n optparser.add_option(\"-v\",\"--version\",action=\"store_true\",dest=\"showversion\",\n default=False,help=\"Show the version\")\n optparser.add_option(\"-t\",\"--top\",dest=\"topmodule\",\n default=\"userlogic\",help=\"Top module, Default=userlogic\")\n optparser.add_option(\"-o\",\"--output\",dest=\"outputfile\",\n default=\"out.v\",help=\"Output file name, Default=out.v\")\n optparser.add_option(\"-I\",\"--include\",dest=\"include\",action=\"append\",\n default=[],help=\"Include path\")\n optparser.add_option(\"-D\",dest=\"define\",action=\"append\",\n default=[],help=\"Macro Definition\")\n optparser.add_option(\"--singleclock\",action=\"store_true\",dest=\"single_clock\",\n default=False,help=\"Use single clock mode\")\n (options, args) = optparser.parse_args()\n\n filelist = args\n if options.showversion:\n showVersion()\n\n for f in filelist:\n if not os.path.exists(f): raise IOError(\"file not found: \" + f)\n\n if len(filelist) == 0:\n showVersion()\n\n converter = RtlConverter(filelist, options.topmodule,\n include=options.include, \n define=options.define,\n single_clock=options.single_clock)\n ast = converter.generate()\n converter.dumpCoramObject()\n \n asttocode = ASTCodeGenerator()\n code = asttocode.visit(ast)\n\n f = open(options.outputfile, 'w')\n f.write(code)\n f.close()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
# coding: utf-8
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api.api_ce import DeviceControllerApi
class DeviceControllerApi(DeviceControllerApi):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
super(DeviceControllerApi, self).__init__(api_client)
def claim_device_using_post(self, device_name, **kwargs): # noqa: E501
"""claimDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.claim_device_using_post(device_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_name: deviceName (required)
:param ClaimRequest claim_request: claimRequest
:param str sub_customer_id: subCustomerId
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501
else:
(data) = self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501
return data
def claim_device_using_post_with_http_info(self, device_name, **kwargs): # noqa: E501
"""claimDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str device_name: deviceName (required)
:param ClaimRequest claim_request: claimRequest
:param str sub_customer_id: subCustomerId
:return: DeferredResultResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device_name', 'claim_request', 'sub_customer_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'device_name' is set
if ('device_name' not in params or
params['device_name'] is None):
raise ValueError("Missing the required parameter `device_name` when calling `claim_device_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'device_name' in params:
path_params['deviceName'] = params['device_name'] # noqa: E501
query_params = []
if 'sub_customer_id' in params:
query_params.append(('subCustomerId', params['sub_customer_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'claim_request' in params:
body_params = params['claim_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/device/{deviceName}/claim{?subCustomerId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DeferredResultResponseEntity', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_customer_devices_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501
return data
def get_customer_devices_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501
"""getCustomerDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str customer_id: customerId (required)
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params or
params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_customer_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_customer_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'customer_id' in params:
path_params['customerId'] = params['customer_id'] # noqa: E501
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_devices_by_entity_group_id_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getDevicesByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501
return data
def get_devices_by_entity_group_id_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501
"""getDevicesByEntityGroupId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str entity_group_id: entityGroupId (required)
:param str page_size: Page size (required)
:param str page: Page (required)
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'entity_group_id' is set
if ('entity_group_id' not in params or
params['entity_group_id'] is None):
raise ValueError("Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`") # noqa: E501
if 'page_size' in params and params['page_size'] < 1.0: # noqa: E501
raise ValueError("Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`") # noqa: E501
if 'page' in params and params['page'] < 0.0: # noqa: E501
raise ValueError("Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'entity_group_id' in params:
path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501
query_params = []
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_tenant_devices_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_tenant_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getTenantDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_tenant_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_devices_using_get(self, page_size, page, **kwargs): # noqa: E501
"""getUserDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_user_devices_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_user_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""getUserDevices # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.get_user_devices_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str page_size: pageSize (required)
:param str page: page (required)
:param str type: type
:param str text_search: textSearch
:param str sort_property: sortProperty
:param str sort_order: sortOrder
:return: PageDataDevice
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_user_devices_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_user_devices_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'type' in params:
query_params.append(('type', params['type'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/user/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataDevice', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_device_using_post(self, device, **kwargs): # noqa: E501
"""saveDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.save_device_using_post(device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Device device: device (required)
:param str access_token: accessToken
:param str entity_group_id: entityGroupId
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501
else:
(data) = self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501
return data
def save_device_using_post_with_http_info(self, device, **kwargs): # noqa: E501
"""saveDevice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Device device: device (required)
:param str access_token: accessToken
:param str entity_group_id: entityGroupId
:return: Device
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['device', 'access_token', 'entity_group_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
params[key] = val
del params['kwargs']
# verify the required parameter 'device' is set
if ('device' not in params or
params['device'] is None):
raise ValueError("Missing the required parameter `device` when calling `save_device_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('accessToken', params['access_token'])) # noqa: E501
if 'entity_group_id' in params:
query_params.append(('entityGroupId', params['entity_group_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'device' in params:
body_params = params['device']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/device{?accessToken,entityGroupId}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Device', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
normal
|
{
"blob_id": "9b30075183cf9611307afa74aa45979872e7e9d5",
"index": 8132,
"step-1": "<mask token>\n\n\nclass DeviceControllerApi(DeviceControllerApi):\n <mask token>\n <mask token>\n\n def claim_device_using_post(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n else:\n data = self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n return data\n\n def claim_device_using_post_with_http_info(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device_name', 'claim_request', 'sub_customer_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device_name' not in params or params['device_name'] is None:\n raise ValueError(\n 'Missing the required parameter `device_name` when calling `claim_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n if 'device_name' in params:\n path_params['deviceName'] = params['device_name']\n query_params = []\n if 'sub_customer_id' in params:\n query_params.append(('subCustomerId', params['sub_customer_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'claim_request' in params:\n body_params = params['claim_request']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/device/{deviceName}/claim{?subCustomerId}',\n 'POST', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='DeferredResultResponseEntity', auth_settings=\n auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n <mask token>\n\n def get_customer_devices_using_get_with_http_info(self, customer_id,\n page_size, page, **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['customer_id', 'page_size', 'page', 'type',\n 'text_search', 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'customer_id' not in params or params['customer_id'] is None:\n raise ValueError(\n 'Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_customer_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_customer_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n if 'customer_id' in params:\n path_params['customerId'] = params['customer_id']\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_devices_by_entity_group_id_using_get(self, entity_group_id,\n page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n else:\n data = (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n return data\n\n def get_devices_by_entity_group_id_using_get_with_http_info(self,\n entity_group_id, page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['entity_group_id', 'page_size', 'page', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'entity_group_id' not in params or params['entity_group_id'\n ] is None:\n raise ValueError(\n 'Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' in params and params['page_size'] < 1.0:\n raise ValueError(\n 'Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`'\n )\n if 'page' in params and params['page'] < 0.0:\n raise ValueError(\n 'Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`'\n )\n collection_formats = {}\n path_params = {}\n if 'entity_group_id' in params:\n path_params['entityGroupId'] = params['entity_group_id']\n query_params = []\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_tenant_devices_using_get(self, page_size, page, **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n else:\n data = self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n return data\n\n def get_tenant_devices_using_get_with_http_info(self, page_size, page,\n **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_tenant_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n <mask token>\n <mask token>\n\n def save_device_using_post(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.save_device_using_post_with_http_info(device, **kwargs)\n else:\n data = self.save_device_using_post_with_http_info(device, **kwargs)\n return data\n\n def save_device_using_post_with_http_info(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device', 'access_token', 'entity_group_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device' not in params or params['device'] is None:\n raise ValueError(\n 'Missing the required parameter `device` when calling `save_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'access_token' in params:\n query_params.append(('accessToken', params['access_token']))\n if 'entity_group_id' in params:\n query_params.append(('entityGroupId', params['entity_group_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'device' in params:\n body_params = params['device']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/device{?accessToken,entityGroupId}', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='Device',\n auth_settings=auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-2": "<mask token>\n\n\nclass DeviceControllerApi(DeviceControllerApi):\n <mask token>\n <mask token>\n\n def claim_device_using_post(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n else:\n data = self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n return data\n\n def claim_device_using_post_with_http_info(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device_name', 'claim_request', 'sub_customer_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device_name' not in params or params['device_name'] is None:\n raise ValueError(\n 'Missing the required parameter `device_name` when calling `claim_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n if 'device_name' in params:\n path_params['deviceName'] = params['device_name']\n query_params = []\n if 'sub_customer_id' in params:\n query_params.append(('subCustomerId', params['sub_customer_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'claim_request' in params:\n body_params = params['claim_request']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/device/{deviceName}/claim{?subCustomerId}',\n 'POST', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='DeferredResultResponseEntity', auth_settings=\n auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_customer_devices_using_get(self, customer_id, page_size, page,\n **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n else:\n data = self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n return data\n\n def get_customer_devices_using_get_with_http_info(self, customer_id,\n page_size, page, **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['customer_id', 'page_size', 'page', 'type',\n 'text_search', 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'customer_id' not in params or params['customer_id'] is None:\n raise ValueError(\n 'Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_customer_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_customer_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n if 'customer_id' in params:\n path_params['customerId'] = params['customer_id']\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_devices_by_entity_group_id_using_get(self, entity_group_id,\n page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n else:\n data = (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n return data\n\n def get_devices_by_entity_group_id_using_get_with_http_info(self,\n entity_group_id, page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['entity_group_id', 'page_size', 'page', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'entity_group_id' not in params or params['entity_group_id'\n ] is None:\n raise ValueError(\n 'Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' in params and params['page_size'] < 1.0:\n raise ValueError(\n 'Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`'\n )\n if 'page' in params and params['page'] < 0.0:\n raise ValueError(\n 'Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`'\n )\n collection_formats = {}\n path_params = {}\n if 'entity_group_id' in params:\n path_params['entityGroupId'] = params['entity_group_id']\n query_params = []\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_tenant_devices_using_get(self, page_size, page, **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n else:\n data = self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n return data\n\n def get_tenant_devices_using_get_with_http_info(self, page_size, page,\n **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_tenant_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n <mask token>\n <mask token>\n\n def save_device_using_post(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.save_device_using_post_with_http_info(device, **kwargs)\n else:\n data = self.save_device_using_post_with_http_info(device, **kwargs)\n return data\n\n def save_device_using_post_with_http_info(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device', 'access_token', 'entity_group_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device' not in params or params['device'] is None:\n raise ValueError(\n 'Missing the required parameter `device` when calling `save_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'access_token' in params:\n query_params.append(('accessToken', params['access_token']))\n if 'entity_group_id' in params:\n query_params.append(('entityGroupId', params['entity_group_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'device' in params:\n body_params = params['device']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/device{?accessToken,entityGroupId}', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='Device',\n auth_settings=auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-3": "<mask token>\n\n\nclass DeviceControllerApi(DeviceControllerApi):\n <mask token>\n\n def __init__(self, api_client=None):\n super(DeviceControllerApi, self).__init__(api_client)\n\n def claim_device_using_post(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n else:\n data = self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n return data\n\n def claim_device_using_post_with_http_info(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device_name', 'claim_request', 'sub_customer_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device_name' not in params or params['device_name'] is None:\n raise ValueError(\n 'Missing the required parameter `device_name` when calling `claim_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n if 'device_name' in params:\n path_params['deviceName'] = params['device_name']\n query_params = []\n if 'sub_customer_id' in params:\n query_params.append(('subCustomerId', params['sub_customer_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'claim_request' in params:\n body_params = params['claim_request']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/device/{deviceName}/claim{?subCustomerId}',\n 'POST', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='DeferredResultResponseEntity', auth_settings=\n auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_customer_devices_using_get(self, customer_id, page_size, page,\n **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n else:\n data = self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n return data\n\n def get_customer_devices_using_get_with_http_info(self, customer_id,\n page_size, page, **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['customer_id', 'page_size', 'page', 'type',\n 'text_search', 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'customer_id' not in params or params['customer_id'] is None:\n raise ValueError(\n 'Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_customer_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_customer_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n if 'customer_id' in params:\n path_params['customerId'] = params['customer_id']\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_devices_by_entity_group_id_using_get(self, entity_group_id,\n page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n else:\n data = (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n return data\n\n def get_devices_by_entity_group_id_using_get_with_http_info(self,\n entity_group_id, page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['entity_group_id', 'page_size', 'page', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'entity_group_id' not in params or params['entity_group_id'\n ] is None:\n raise ValueError(\n 'Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' in params and params['page_size'] < 1.0:\n raise ValueError(\n 'Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`'\n )\n if 'page' in params and params['page'] < 0.0:\n raise ValueError(\n 'Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`'\n )\n collection_formats = {}\n path_params = {}\n if 'entity_group_id' in params:\n path_params['entityGroupId'] = params['entity_group_id']\n query_params = []\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_tenant_devices_using_get(self, page_size, page, **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n else:\n data = self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n return data\n\n def get_tenant_devices_using_get_with_http_info(self, page_size, page,\n **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_tenant_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n <mask token>\n\n def get_user_devices_using_get_with_http_info(self, page_size, page, **\n kwargs):\n \"\"\"getUserDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_user_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_user_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_user_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/user/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def save_device_using_post(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.save_device_using_post_with_http_info(device, **kwargs)\n else:\n data = self.save_device_using_post_with_http_info(device, **kwargs)\n return data\n\n def save_device_using_post_with_http_info(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device', 'access_token', 'entity_group_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device' not in params or params['device'] is None:\n raise ValueError(\n 'Missing the required parameter `device` when calling `save_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'access_token' in params:\n query_params.append(('accessToken', params['access_token']))\n if 'entity_group_id' in params:\n query_params.append(('entityGroupId', params['entity_group_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'device' in params:\n body_params = params['device']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/device{?accessToken,entityGroupId}', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='Device',\n auth_settings=auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-4": "<mask token>\n\n\nclass DeviceControllerApi(DeviceControllerApi):\n <mask token>\n\n def __init__(self, api_client=None):\n super(DeviceControllerApi, self).__init__(api_client)\n\n def claim_device_using_post(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n else:\n data = self.claim_device_using_post_with_http_info(device_name,\n **kwargs)\n return data\n\n def claim_device_using_post_with_http_info(self, device_name, **kwargs):\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device_name', 'claim_request', 'sub_customer_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device_name' not in params or params['device_name'] is None:\n raise ValueError(\n 'Missing the required parameter `device_name` when calling `claim_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n if 'device_name' in params:\n path_params['deviceName'] = params['device_name']\n query_params = []\n if 'sub_customer_id' in params:\n query_params.append(('subCustomerId', params['sub_customer_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'claim_request' in params:\n body_params = params['claim_request']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/device/{deviceName}/claim{?subCustomerId}',\n 'POST', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='DeferredResultResponseEntity', auth_settings=\n auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_customer_devices_using_get(self, customer_id, page_size, page,\n **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n else:\n data = self.get_customer_devices_using_get_with_http_info(\n customer_id, page_size, page, **kwargs)\n return data\n\n def get_customer_devices_using_get_with_http_info(self, customer_id,\n page_size, page, **kwargs):\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['customer_id', 'page_size', 'page', 'type',\n 'text_search', 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'customer_id' not in params or params['customer_id'] is None:\n raise ValueError(\n 'Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_customer_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_customer_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n if 'customer_id' in params:\n path_params['customerId'] = params['customer_id']\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_devices_by_entity_group_id_using_get(self, entity_group_id,\n page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n else:\n data = (self.\n get_devices_by_entity_group_id_using_get_with_http_info(\n entity_group_id, page_size, page, **kwargs))\n return data\n\n def get_devices_by_entity_group_id_using_get_with_http_info(self,\n entity_group_id, page_size, page, **kwargs):\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['entity_group_id', 'page_size', 'page', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'entity_group_id' not in params or params['entity_group_id'\n ] is None:\n raise ValueError(\n 'Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`'\n )\n if 'page_size' in params and params['page_size'] < 1.0:\n raise ValueError(\n 'Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`'\n )\n if 'page' in params and params['page'] < 0.0:\n raise ValueError(\n 'Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`'\n )\n collection_formats = {}\n path_params = {}\n if 'entity_group_id' in params:\n path_params['entityGroupId'] = params['entity_group_id']\n query_params = []\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_tenant_devices_using_get(self, page_size, page, **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n else:\n data = self.get_tenant_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n return data\n\n def get_tenant_devices_using_get_with_http_info(self, page_size, page,\n **kwargs):\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_tenant_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def get_user_devices_using_get(self, page_size, page, **kwargs):\n \"\"\"getUserDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_user_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_user_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n else:\n data = self.get_user_devices_using_get_with_http_info(page_size,\n page, **kwargs)\n return data\n\n def get_user_devices_using_get_with_http_info(self, page_size, page, **\n kwargs):\n \"\"\"getUserDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_user_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['page_size', 'page', 'type', 'text_search',\n 'sort_property', 'sort_order']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'page_size' not in params or params['page_size'] is None:\n raise ValueError(\n 'Missing the required parameter `page_size` when calling `get_user_devices_using_get`'\n )\n if 'page' not in params or params['page'] is None:\n raise ValueError(\n 'Missing the required parameter `page` when calling `get_user_devices_using_get`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type']))\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search']))\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property']))\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order']))\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size']))\n if 'page' in params:\n query_params.append(('page', params['page']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/user/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}'\n , 'GET', path_params, query_params, header_params, body=\n body_params, post_params=form_params, files=local_var_files,\n response_type='PageDataDevice', auth_settings=auth_settings,\n async_req=params.get('async_req'), _return_http_data_only=\n params.get('_return_http_data_only'), _preload_content=params.\n get('_preload_content', True), _request_timeout=params.get(\n '_request_timeout'), collection_formats=collection_formats)\n\n def save_device_using_post(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.save_device_using_post_with_http_info(device, **kwargs)\n else:\n data = self.save_device_using_post_with_http_info(device, **kwargs)\n return data\n\n def save_device_using_post_with_http_info(self, device, **kwargs):\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n all_params = ['device', 'access_token', 'entity_group_id']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n if 'device' not in params or params['device'] is None:\n raise ValueError(\n 'Missing the required parameter `device` when calling `save_device_using_post`'\n )\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'access_token' in params:\n query_params.append(('accessToken', params['access_token']))\n if 'entity_group_id' in params:\n query_params.append(('entityGroupId', params['entity_group_id']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n if 'device' in params:\n body_params = params['device']\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n header_params['Content-Type'\n ] = self.api_client.select_header_content_type(['application/json']\n )\n auth_settings = ['X-Authorization']\n return self.api_client.call_api(\n '/api/device{?accessToken,entityGroupId}', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='Device',\n auth_settings=auth_settings, async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-5": "# coding: utf-8\n# Copyright 2020. ThingsBoard\n# #\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# #\n# http://www.apache.org/licenses/LICENSE-2.0\n# #\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom tb_rest_client.api.api_ce import DeviceControllerApi\n\n\nclass DeviceControllerApi(DeviceControllerApi):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n super(DeviceControllerApi, self).__init__(api_client)\n\n def claim_device_using_post(self, device_name, **kwargs): # noqa: E501\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501\n else:\n (data) = self.claim_device_using_post_with_http_info(device_name, **kwargs) # noqa: E501\n return data\n\n def claim_device_using_post_with_http_info(self, device_name, **kwargs): # noqa: E501\n \"\"\"claimDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.claim_device_using_post_with_http_info(device_name, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str device_name: deviceName (required)\n :param ClaimRequest claim_request: claimRequest\n :param str sub_customer_id: subCustomerId\n :return: DeferredResultResponseEntity\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['device_name', 'claim_request', 'sub_customer_id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'device_name' is set\n if ('device_name' not in params or\n params['device_name'] is None):\n raise ValueError(\"Missing the required parameter `device_name` when calling `claim_device_using_post`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'device_name' in params:\n path_params['deviceName'] = params['device_name'] # noqa: E501\n\n query_params = []\n if 'sub_customer_id' in params:\n query_params.append(('subCustomerId', params['sub_customer_id'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'claim_request' in params:\n body_params = params['claim_request']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/customer/device/{deviceName}/claim{?subCustomerId}', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='DeferredResultResponseEntity', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_customer_devices_using_get(self, customer_id, page_size, page, **kwargs): # noqa: E501\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501\n else:\n (data) = self.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, **kwargs) # noqa: E501\n return data\n\n def get_customer_devices_using_get_with_http_info(self, customer_id, page_size, page, **kwargs): # noqa: E501\n \"\"\"getCustomerDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_customer_devices_using_get_with_http_info(customer_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str customer_id: customerId (required)\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['customer_id', 'page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'customer_id' is set\n if ('customer_id' not in params or\n params['customer_id'] is None):\n raise ValueError(\"Missing the required parameter `customer_id` when calling `get_customer_devices_using_get`\") # noqa: E501\n # verify the required parameter 'page_size' is set\n if ('page_size' not in params or\n params['page_size'] is None):\n raise ValueError(\"Missing the required parameter `page_size` when calling `get_customer_devices_using_get`\") # noqa: E501\n # verify the required parameter 'page' is set\n if ('page' not in params or\n params['page'] is None):\n raise ValueError(\"Missing the required parameter `page` when calling `get_customer_devices_using_get`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'customer_id' in params:\n path_params['customerId'] = params['customer_id'] # noqa: E501\n\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type'])) # noqa: E501\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search'])) # noqa: E501\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property'])) # noqa: E501\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order'])) # noqa: E501\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/customer/{customerId}/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageDataDevice', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_devices_by_entity_group_id_using_get(self, entity_group_id, page_size, page, **kwargs): # noqa: E501\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501\n else:\n (data) = self.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, **kwargs) # noqa: E501\n return data\n\n def get_devices_by_entity_group_id_using_get_with_http_info(self, entity_group_id, page_size, page, **kwargs): # noqa: E501\n \"\"\"getDevicesByEntityGroupId # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_devices_by_entity_group_id_using_get_with_http_info(entity_group_id, page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str entity_group_id: entityGroupId (required)\n :param str page_size: Page size (required)\n :param str page: Page (required)\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['entity_group_id', 'page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'entity_group_id' is set\n if ('entity_group_id' not in params or\n params['entity_group_id'] is None):\n raise ValueError(\"Missing the required parameter `entity_group_id` when calling `get_devices_by_entity_group_id_using_get`\") # noqa: E501\n # verify the required parameter 'page_size' is set\n if ('page_size' not in params or\n params['page_size'] is None):\n raise ValueError(\"Missing the required parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`\") # noqa: E501\n # verify the required parameter 'page' is set\n if ('page' not in params or\n params['page'] is None):\n raise ValueError(\"Missing the required parameter `page` when calling `get_devices_by_entity_group_id_using_get`\") # noqa: E501\n\n if 'page_size' in params and params['page_size'] < 1.0: # noqa: E501\n raise ValueError(\"Invalid value for parameter `page_size` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `1.0`\") # noqa: E501\n if 'page' in params and params['page'] < 0.0: # noqa: E501\n raise ValueError(\"Invalid value for parameter `page` when calling `get_devices_by_entity_group_id_using_get`, must be a value greater than or equal to `0.0`\") # noqa: E501\n collection_formats = {}\n\n path_params = {}\n if 'entity_group_id' in params:\n path_params['entityGroupId'] = params['entity_group_id'] # noqa: E501\n\n query_params = []\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search'])) # noqa: E501\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property'])) # noqa: E501\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order'])) # noqa: E501\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/entityGroup/{entityGroupId}/devices{?textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageDataDevice', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_tenant_devices_using_get(self, page_size, page, **kwargs): # noqa: E501\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501\n else:\n (data) = self.get_tenant_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501\n return data\n\n def get_tenant_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501\n \"\"\"getTenantDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_tenant_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'page_size' is set\n if ('page_size' not in params or\n params['page_size'] is None):\n raise ValueError(\"Missing the required parameter `page_size` when calling `get_tenant_devices_using_get`\") # noqa: E501\n # verify the required parameter 'page' is set\n if ('page' not in params or\n params['page'] is None):\n raise ValueError(\"Missing the required parameter `page` when calling `get_tenant_devices_using_get`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type'])) # noqa: E501\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search'])) # noqa: E501\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property'])) # noqa: E501\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order'])) # noqa: E501\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/tenant/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageDataDevice', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_user_devices_using_get(self, page_size, page, **kwargs): # noqa: E501\n \"\"\"getUserDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_user_devices_using_get(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501\n else:\n (data) = self.get_user_devices_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501\n return data\n\n def get_user_devices_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501\n \"\"\"getUserDevices # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.get_user_devices_using_get_with_http_info(page_size, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param str page_size: pageSize (required)\n :param str page: page (required)\n :param str type: type\n :param str text_search: textSearch\n :param str sort_property: sortProperty\n :param str sort_order: sortOrder\n :return: PageDataDevice\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['page_size', 'page', 'type', 'text_search', 'sort_property', 'sort_order'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'page_size' is set\n if ('page_size' not in params or\n params['page_size'] is None):\n raise ValueError(\"Missing the required parameter `page_size` when calling `get_user_devices_using_get`\") # noqa: E501\n # verify the required parameter 'page' is set\n if ('page' not in params or\n params['page'] is None):\n raise ValueError(\"Missing the required parameter `page` when calling `get_user_devices_using_get`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'type' in params:\n query_params.append(('type', params['type'])) # noqa: E501\n if 'text_search' in params:\n query_params.append(('textSearch', params['text_search'])) # noqa: E501\n if 'sort_property' in params:\n query_params.append(('sortProperty', params['sort_property'])) # noqa: E501\n if 'sort_order' in params:\n query_params.append(('sortOrder', params['sort_order'])) # noqa: E501\n if 'page_size' in params:\n query_params.append(('pageSize', params['page_size'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/user/devices{?type,textSearch,sortProperty,sortOrder,pageSize,page}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='PageDataDevice', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def save_device_using_post(self, device, **kwargs): # noqa: E501\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501\n else:\n (data) = self.save_device_using_post_with_http_info(device, **kwargs) # noqa: E501\n return data\n\n def save_device_using_post_with_http_info(self, device, **kwargs): # noqa: E501\n \"\"\"saveDevice # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api_pe.save_device_using_post_with_http_info(device, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param Device device: device (required)\n :param str access_token: accessToken\n :param str entity_group_id: entityGroupId\n :return: Device\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['device', 'access_token', 'entity_group_id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n \n params[key] = val\n del params['kwargs']\n # verify the required parameter 'device' is set\n if ('device' not in params or\n params['device'] is None):\n raise ValueError(\"Missing the required parameter `device` when calling `save_device_using_post`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'access_token' in params:\n query_params.append(('accessToken', params['access_token'])) # noqa: E501\n if 'entity_group_id' in params:\n query_params.append(('entityGroupId', params['entity_group_id'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'device' in params:\n body_params = params['device']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['X-Authorization'] # noqa: E501\n\n return self.api_client.call_api(\n '/api/device{?accessToken,entityGroupId}', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Device', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-ids": [
10,
11,
13,
14,
17
]
}
|
[
10,
11,
13,
14,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(my_list)
print(lista_impares)
print('')
<|reserved_special_token_0|>
print(my_list)
print(lista_pares)
<|reserved_special_token_1|>
my_list = [1, 4, 5, 6, 9, 13, 19, 21]
lista_impares = [num for num in my_list if num % 2 != 0]
print(my_list)
print(lista_impares)
print('')
lista_pares = list(filter(lambda x: x % 2 == 0, my_list))
print(my_list)
print(lista_pares)
<|reserved_special_token_1|>
# Obtener en otra lista unicamente números impares:
my_list = [1, 4, 5, 6, 9, 13, 19, 21]
# Vamos a hacer una list comprehension:
lista_impares = [num for num in my_list if num % 2 != 0]
print(my_list)
print(lista_impares)
print('')
# Vamos a usar filter:
lista_pares = list(filter(lambda x: x % 2 == 0 , my_list))
print(my_list)
print(lista_pares)
|
flexible
|
{
"blob_id": "e1913c80375e4871119182d0267e9f228818624f",
"index": 4309,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(my_list)\nprint(lista_impares)\nprint('')\n<mask token>\nprint(my_list)\nprint(lista_pares)\n",
"step-3": "my_list = [1, 4, 5, 6, 9, 13, 19, 21]\nlista_impares = [num for num in my_list if num % 2 != 0]\nprint(my_list)\nprint(lista_impares)\nprint('')\nlista_pares = list(filter(lambda x: x % 2 == 0, my_list))\nprint(my_list)\nprint(lista_pares)\n",
"step-4": "# Obtener en otra lista unicamente números impares:\n\nmy_list = [1, 4, 5, 6, 9, 13, 19, 21]\n\n# Vamos a hacer una list comprehension:\nlista_impares = [num for num in my_list if num % 2 != 0]\nprint(my_list)\nprint(lista_impares)\nprint('')\n\n\n# Vamos a usar filter:\nlista_pares = list(filter(lambda x: x % 2 == 0 , my_list))\nprint(my_list)\nprint(lista_pares)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register(
'file','',VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'File path for storing output')
options.parseArguments()
file_path = options.file
#print file_path
process = cms.Process("RawAnalyzer")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events
#default is HcalTBSource but you can change to PoolSource if you like
#process.source = cms.Source("HcalTBSource",
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal
# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local
# '/store/group/comm_hcal/LS1/USC_222759.root'
# '/store/group/comm_hcal/LS1/USC_223775.root'
# '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014
# '/store/group/comm_hcal/LS1/USC_224625.root'
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'
)
)
process.analyzer = cms.EDAnalyzer('RawAnalyzer',
debugit = cms.untracked.bool(False),
outputFile = cms.untracked.string(file_path),
badevlist = cms.vint32(
153647285, 152905909, 153143477, 153217205, 151718625, 153024693, 150641153, 151460577,
152364043, 152889525, 153151669, 151148928, 153471157, 149944833, 151407329, 152529024,
150403585, 151124352, 152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313, 152910005, 153348277,
154002162, 149846529, 150489601, 150526465, 151370465, 152959157, 153262261, 153916146,
150202881, 152750261, 153004213),
modval = cms.untracked.int32(112)
)
process.TFileService = cms.Service("TFileService",fileName = cms.string("RawAnalyzer.root") )
process.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events
process.p = cms.Path(process.analyzer)
|
normal
|
{
"blob_id": "6aff61ce5cef537e6b1b19e382d8bf80e3a61693",
"index": 1423,
"step-1": "<mask token>\n",
"step-2": "<mask token>\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\n<mask token>\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\n<mask token>\n",
"step-3": "<mask token>\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-4": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register('file', '', VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string, 'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\nprocess = cms.Process('RawAnalyzer')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\nprocess.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(\n 'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n ))\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool\n (False), outputFile=cms.untracked.string(file_path), badevlist=cms.\n vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,\n 150641153, 151460577, 152364043, 152889525, 153151669, 151148928, \n 153471157, 149944833, 151407329, 152529024, 150403585, 151124352, \n 152368139, 152451200, 152950965, 153135285, 154125042, 154268402, \n 152261643, 150718977, 152737973, 153409717, 153800866, 151321313, \n 152910005, 153348277, 154002162, 149846529, 150489601, 150526465, \n 151370465, 152959157, 153262261, 153916146, 150202881, 152750261, \n 153004213), modval=cms.untracked.int32(112))\nprocess.TFileService = cms.Service('TFileService', fileName=cms.string(\n 'RawAnalyzer.root'))\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000\nprocess.p = cms.Path(process.analyzer)\n",
"step-5": "import FWCore.ParameterSet.Config as cms\nimport FWCore.ParameterSet.VarParsing as VarParsing\noptions = VarParsing.VarParsing()\noptions.register(\n\t'file','',VarParsing.VarParsing.multiplicity.singleton,\n\tVarParsing.VarParsing.varType.string,\n\t'File path for storing output')\noptions.parseArguments()\nfile_path = options.file\n#print file_path\n\nprocess = cms.Process(\"RawAnalyzer\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events\n\n#default is HcalTBSource but you can change to PoolSource if you like\n#process.source = cms.Source(\"HcalTBSource\",\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'\n# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal\n# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local\n# '/store/group/comm_hcal/LS1/USC_222759.root'\n# '/store/group/comm_hcal/LS1/USC_223775.root'\n#\t '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014\n# '/store/group/comm_hcal/LS1/USC_224625.root'\n'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'\n#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'\n )\n)\n\nprocess.analyzer = cms.EDAnalyzer('RawAnalyzer',\n\tdebugit = cms.untracked.bool(False),\n\toutputFile = cms.untracked.string(file_path),\n\tbadevlist = cms.vint32(\n\t153647285,\t152905909,\t153143477,\t153217205,\t151718625,\t153024693,\t150641153,\t151460577,\n\t152364043,\t152889525,\t153151669,\t151148928,\t153471157,\t149944833,\t151407329,\t152529024,\n\t150403585,\t151124352,\t152368139,\t152451200,\t152950965,\t153135285,\t154125042,\t154268402,\n\t152261643,\t150718977,\t152737973,\t153409717,\t153800866,\t151321313,\t152910005,\t153348277,\n\t154002162,\t149846529,\t150489601,\t150526465,\t151370465,\t152959157,\t153262261,\t153916146,\n\t150202881,\t152750261, 153004213),\n\tmodval = cms.untracked.int32(112)\n)\nprocess.TFileService = cms.Service(\"TFileService\",fileName = cms.string(\"RawAnalyzer.root\") )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events\nprocess.p = cms.Path(process.analyzer)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in sys.stdin:
edges = [int(x) for x in line.split('x')]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split('x')]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
<|reserved_special_token_1|>
import sys
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split('x')]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
<|reserved_special_token_1|>
#!/usr/bin/env python
import sys
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split("x")]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
|
flexible
|
{
"blob_id": "ed85cb61f4bc8bf758dafb10ffbabf87fb4521d0",
"index": 9281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-3": "<mask token>\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-4": "import sys\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-5": "#!/usr/bin/env python\n\nimport sys\n\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split(\"x\")]\n\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n\n l, w, h = edges\n bow = l * w * h\n\n total += bow + ribbon\n\nprint(total)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import psycopg2
from .configuration import ConfigurationException
DB_CONNECT_STRING = "host='{host}' dbname='{dbname}' user='{user}' password='{passwd}'"
class DBItemCompany:
def __init__(self, _id, tweeter, category, categoryUrl, provenScore, ranking, location, url, categoryId):
self.id = _id
self.twitterAccount = tweeter
self.category = category
self.categoryUrl = categoryUrl
self.provenScore = provenScore
self.ranking = ranking
self.location = location
self.url = url
self.categoryId = categoryId
@property
def invalidScore(self):
return self.provenScore is None or self.provenScore < 1
@property
def twitter(self):
return '@' + self.twitterAccount
class DBException(Exception):
"""
Represents a generic exception thrown by the Database Manager
"""
pass
class DBManager:
def __init__(self, cfg):
self.cfg = cfg
self.__companies = {}
self.__loggedIn = False
self.connection = None
self.cursor = None
def __del__(self):
try:
self.connection.close()
except psycopg2.Error:
pass
def __logInDb(self):
try:
dbSettings = self.cfg.databaseSettings
self.connection = psycopg2.connect(DB_CONNECT_STRING.format(
host=dbSettings[0], dbname=dbSettings[1],
user=dbSettings[2], passwd=dbSettings[3]
))
self.cursor = self.connection.cursor()
self.__loggedIn = True
return True
except (psycopg2.OperationalError, ConfigurationException):
return False
def __getDomainName(self, schema):
try:
self.cursor.execute("SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'".format(schemaname=schema))
return 'http://' + self.cursor.fetchone()[0]
except psycopg2.DatabaseError:
raise DBException('Failed to extract domain name from database')
def __buildCategoryUrl(self, catId, schemaName):
return '{domain}/vendors/?find=category-{categoryId}'.format(domain=self.__getDomainName(schemaName), categoryId=catId)
def __buildProfileUrl(self, catSlug, profSlug, schemaName):
return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain=self.__getDomainName(schemaName),
categorySlug=catSlug,
profileSlug=profSlug)
def __buildProfileUrlWOCategory(self, profSlug, schemaName):
return '{domain}/vendors/{profileSlug}'.format(domain=self.__getDomainName(schemaName), profileSlug=profSlug)
def __getCompaniesData(self, schema):
"""
Load Companies list from database
"""
try:
self.cursor.execute("""SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE
twitter <> ''""".format(schema=schema))
data = self.cursor.fetchall()
companies = []
for entry in data:
self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))
cities = self.cursor.fetchall()
if cities is None:
continue
city = ''
for cityId in cities:
self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))
cityName = self.cursor.fetchone()
if cityName is not None:
city += cityName[0]
self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND "primary" is true'.format(schema=schema, vendor=entry[0]))
customKind = self.cursor.fetchone()
if customKind is None:
catId = rank = None
else:
catId, rank = customKind
if catId is not None:
self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))
catData = self.cursor.fetchone()
else:
catData = None
companies.append(DBItemCompany(
_id = entry[0],
tweeter = entry[1],
category = catData[0] if catData is not None else None,
categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,
provenScore = entry[2],
ranking = rank,
location = city,
url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),
categoryId = catId
))
self.__companies[schema] = companies
except psycopg2.DatabaseError as err:
raise DBException(err.args[0])
def domainUrl(self, schema):
return self.__getDomainName(schema)
def refreshData(self, schemas):
if not self.__loggedIn:
if not self.__logInDb():
return False
for schema in schemas:
self.__getCompaniesData(schema)
return True
@property
def companies(self):
return self.__companies
@property
def isConnected(self):
return self.__loggedIn
|
normal
|
{
"blob_id": "31b87a3ceca1f48665ecc9754d5f87bb9b7bbf13",
"index": 7579,
"step-1": "<mask token>\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-2": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n <mask token>\n <mask token>\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-3": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n <mask token>\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-4": "<mask token>\n\n\nclass DBItemCompany:\n\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore,\n ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n\n @property\n def invalidScore(self):\n return self.provenScore is None or self.provenScore < 1\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\n\nclass DBManager:\n\n def __init__(self, cfg):\n self.cfg = cfg\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1], user=dbSettings[2\n ], passwd=dbSettings[3]))\n self.cursor = self.connection.cursor()\n self.__loggedIn = True\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\n \"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\"\n .format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain\n =self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain\n =self.__getDomainName(schemaName), categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.\n __getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\n \"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\"\n .format(schema=schema))\n data = self.cursor.fetchall()\n companies = []\n for entry in data:\n self.cursor.execute(\n 'SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'\n .format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n if cities is None:\n continue\n city = ''\n for cityId in cities:\n self.cursor.execute(\n 'SELECT city FROM {schema}.locations_location WHERE id = {city}'\n .format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n if cityName is not None:\n city += cityName[0]\n self.cursor.execute(\n 'SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'\n .format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n if catId is not None:\n self.cursor.execute(\n 'SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'\n .format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n companies.append(DBItemCompany(_id=entry[0], tweeter=entry[\n 1], category=catData[0] if catData is not None else\n None, categoryUrl=self.__buildCategoryUrl(catId, schema\n ) if catId is not None else None, provenScore=entry[2],\n ranking=rank, location=city, url=self.__buildProfileUrl\n (catData[1], entry[3], schema) if catData is not None else\n self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId=catId))\n self.__companies[schema] = companies\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n for schema in schemas:\n self.__getCompaniesData(schema)\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-5": "import psycopg2\n\nfrom .configuration import ConfigurationException\n\nDB_CONNECT_STRING = \"host='{host}' dbname='{dbname}' user='{user}' password='{passwd}'\"\n\nclass DBItemCompany:\n def __init__(self, _id, tweeter, category, categoryUrl, provenScore, ranking, location, url, categoryId):\n self.id = _id\n self.twitterAccount = tweeter\n self.category = category\n self.categoryUrl = categoryUrl\n self.provenScore = provenScore\n self.ranking = ranking\n self.location = location\n self.url = url\n self.categoryId = categoryId\n\n @property\n def invalidScore(self):\n return self.provenScore is None or self.provenScore < 1\n\n @property\n def twitter(self):\n return '@' + self.twitterAccount\n\nclass DBException(Exception):\n \"\"\"\n Represents a generic exception thrown by the Database Manager\n \"\"\"\n pass\n\nclass DBManager:\n def __init__(self, cfg):\n self.cfg = cfg\n\n self.__companies = {}\n self.__loggedIn = False\n self.connection = None\n self.cursor = None\n\n def __del__(self):\n try:\n self.connection.close()\n except psycopg2.Error:\n pass\n\n def __logInDb(self):\n try:\n dbSettings = self.cfg.databaseSettings\n\n self.connection = psycopg2.connect(DB_CONNECT_STRING.format(\n host=dbSettings[0], dbname=dbSettings[1],\n user=dbSettings[2], passwd=dbSettings[3]\n ))\n self.cursor = self.connection.cursor()\n\n self.__loggedIn = True\n\n return True\n except (psycopg2.OperationalError, ConfigurationException):\n return False\n\n def __getDomainName(self, schema):\n try:\n self.cursor.execute(\"SELECT domain_url FROM customers_customer WHERE schema_name='{schemaname}'\".format(schemaname=schema))\n return 'http://' + self.cursor.fetchone()[0]\n except psycopg2.DatabaseError:\n raise DBException('Failed to extract domain name from database')\n\n def __buildCategoryUrl(self, catId, schemaName):\n return '{domain}/vendors/?find=category-{categoryId}'.format(domain=self.__getDomainName(schemaName), categoryId=catId)\n\n def __buildProfileUrl(self, catSlug, profSlug, schemaName):\n return '{domain}/vendors/{categorySlug}/{profileSlug}'.format(domain=self.__getDomainName(schemaName),\n categorySlug=catSlug,\n profileSlug=profSlug)\n\n def __buildProfileUrlWOCategory(self, profSlug, schemaName):\n return '{domain}/vendors/{profileSlug}'.format(domain=self.__getDomainName(schemaName), profileSlug=profSlug)\n\n def __getCompaniesData(self, schema):\n \"\"\"\n Load Companies list from database\n \"\"\"\n try:\n self.cursor.execute(\"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\".format(schema=schema))\n data = self.cursor.fetchall()\n\n companies = []\n for entry in data:\n self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n\n if cities is None:\n continue\n\n city = ''\n\n for cityId in cities:\n self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n\n if cityName is not None:\n city += cityName[0]\n\n self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'.format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n\n if catId is not None:\n self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n\n companies.append(DBItemCompany(\n _id = entry[0],\n tweeter = entry[1],\n category = catData[0] if catData is not None else None,\n categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,\n provenScore = entry[2],\n ranking = rank,\n location = city,\n url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId = catId\n ))\n\n self.__companies[schema] = companies\n\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])\n\n def domainUrl(self, schema):\n return self.__getDomainName(schema)\n\n def refreshData(self, schemas):\n if not self.__loggedIn:\n if not self.__logInDb():\n return False\n\n for schema in schemas:\n self.__getCompaniesData(schema)\n\n return True\n\n @property\n def companies(self):\n return self.__companies\n\n @property\n def isConnected(self):\n return self.__loggedIn\n",
"step-ids": [
15,
17,
18,
19,
22
]
}
|
[
15,
17,
18,
19,
22
] |
# config {stack,buffer,label}
def get_features_da(config,sent_dict):
features = []
# TODO Improve Features
if len(config[0]) > 0:
# Top of stack.
top = config[0][-1]
top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())
features.append(top_stk_token_feature)
top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly
features.append(top_stk_lemma)
top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())
features.append(top_stk_cpostag)
if len(config[1]) > 0:
top_buffer = config[1][-1] # top of buffer, since it is in descending order
top_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())
features.append(top_buffer_token_feature)
top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())
features.append(top_buffer_lemma)
top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())
features.append(top_buffer_cpostag)
if len(config[0]) > 1:
two = config[0][-2] # 2nd from top in stack
# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())
# features.append(two_stk_token)
# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())
# features.append(two_stk_lemma)
two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())
features.append(two_stk_cpostag)
if len(config[1]) > 1:
two_buffer = config[1][-2] # 2nd from top in buffer
two_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())
features.append(two_buffer_token)
# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])
# features.append(two_buffer_lemma)
two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())
features.append(two_buffer_cpostag)
# if len(config[0]) > 2:
# three = config[0][-3] # 3rd from top in stack
# three_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])
# features.append(three_stk_lemma)
# three_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())
# features.append(three_stk_cpostag)
if len(config[1]) > 2:
three_buffer = config[1][-3] # 3rd from top in buffer
# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())
# features.append(three_buffer_lemma)
three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())
features.append(three_buffer_cpostag)
# if len(config[0]) > 3:
# four = config[0][-4] # 4th from top in stack
# four_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())
# features.append(four_stk_lemma)
# four_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())
# features.append(four_stk_cpostag)
if len(config[1]) > 3:
four_buffer = config[1][-4] # 4th from top in buffer
# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())
# features.append(four_buffer_lemma)
four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())
features.append(four_buffer_cpostag)
return features
|
normal
|
{
"blob_id": "e0ce8a8ad9c842b013bbb1ea1c585b6c4c2a68f5",
"index": 2868,
"step-1": "<mask token>\n",
"step-2": "def get_features_da(config, sent_dict):\n features = []\n if len(config[0]) > 0:\n top = config[0][-1]\n top_stk_token_feature = 'TOP_STK_TOKEN_' + str(sent_dict['FORM'][\n top].lower())\n features.append(top_stk_token_feature)\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower())\n features.append(top_stk_lemma)\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top\n ].lower())\n features.append(top_stk_cpostag)\n if len(config[1]) > 0:\n top_buffer = config[1][-1]\n top_buffer_token_feature = 'TOP_BUFFER_TOKEN' + str(sent_dict[\n 'FORM'][top_buffer].lower())\n features.append(top_buffer_token_feature)\n top_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][\n top_buffer].lower())\n features.append(top_buffer_lemma)\n top_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][top_buffer].lower())\n features.append(top_buffer_cpostag)\n if len(config[0]) > 1:\n two = config[0][-2]\n two_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two\n ].lower())\n features.append(two_stk_cpostag)\n if len(config[1]) > 1:\n two_buffer = config[1][-2]\n two_buffer_token = 'TWO_BUFFER_TOKEN_' + str(sent_dict['FORM'][\n two_buffer].lower())\n features.append(two_buffer_token)\n two_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][two_buffer].lower())\n features.append(two_buffer_cpostag)\n if len(config[1]) > 2:\n three_buffer = config[1][-3]\n three_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][three_buffer].lower())\n features.append(three_buffer_cpostag)\n if len(config[1]) > 3:\n four_buffer = config[1][-4]\n four_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict[\n 'CPOSTAG'][four_buffer].lower())\n features.append(four_buffer_cpostag)\n return features\n",
"step-3": "# config {stack,buffer,label}\ndef get_features_da(config,sent_dict):\n features = []\n\n # TODO Improve Features\n \n if len(config[0]) > 0:\n # Top of stack.\n top = config[0][-1] \n \n top_stk_token_feature = 'TOP_STK_TOKEN_'+str(sent_dict['FORM'][top].lower())\n features.append(top_stk_token_feature)\n\t\n top_stk_lemma = 'TOP_STK_LEMMA_' + str(sent_dict['LEMMA'][top].lower()) # not converting to lower has helped to increase the f1 score slightly\n features.append(top_stk_lemma)\n\n top_stk_cpostag = 'TOP_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][top].lower())\n features.append(top_stk_cpostag)\n\t\n \n if len(config[1]) > 0:\n \ttop_buffer = config[1][-1] # top of buffer, since it is in descending order\n\n \ttop_buffer_token_feature = 'TOP_BUFFER_TOKEN'+str(sent_dict['FORM'][top_buffer].lower())\n \tfeatures.append(top_buffer_token_feature)\n\n \ttop_buffer_lemma = 'TOP_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][top_buffer].lower())\n \tfeatures.append(top_buffer_lemma)\n\n \ttop_buffer_cpostag = 'TOP_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][top_buffer].lower())\n \tfeatures.append(top_buffer_cpostag)\n\t\n\n if len(config[0]) > 1:\n \ttwo = config[0][-2] # 2nd from top in stack\n \t\n \t# two_stk_token = 'two_stk_token_'+str(sent_dict['FORM'][two].lower())\n \t# features.append(two_stk_token)\n\n \t# two_stk_lemma = 'TWO_STK_LEMMA_' + str(sent_dict['LEMMA'][two].lower())\n \t# features.append(two_stk_lemma)\n\n \ttwo_stk_cpostag = 'TWO_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][two].lower())\n \tfeatures.append(two_stk_cpostag)\n\n if len(config[1]) > 1:\n \ttwo_buffer = config[1][-2] # 2nd from top in buffer\n\n \ttwo_buffer_token = 'TWO_BUFFER_TOKEN_'+str(sent_dict['FORM'][two_buffer].lower())\n \tfeatures.append(two_buffer_token)\n\n \t# two_buffer_lemma = 'TWO_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][two_buffer])\n \t# features.append(two_buffer_lemma)\n\n \ttwo_buffer_cpostag = 'TWO_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][two_buffer].lower())\n \tfeatures.append(two_buffer_cpostag)\n\t\n\n # if len(config[0]) > 2:\n # \tthree = config[0][-3] # 3rd from top in stack\n\n # \tthree_stk_lemma = 'THREE_STACK_LEMMA_' + str(sent_dict['LEMMA'][three])\n # \tfeatures.append(three_stk_lemma)\n\n # \tthree_stk_cpostag = 'THREE_STACK_CPOSTAG_' + str(sent_dict['CPOSTAG'][three].lower())\n # \tfeatures.append(three_stk_cpostag)\n\n if len(config[1]) > 2:\n \tthree_buffer = config[1][-3] # 3rd from top in buffer\n\n \t# three_buffer_lemma = 'THREE_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][three_buffer].lower())\n \t# features.append(three_buffer_lemma)\n\n \tthree_buffer_cpostag = 'THREE_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][three_buffer].lower())\n \tfeatures.append(three_buffer_cpostag)\n\n # if len(config[0]) > 3:\n # \tfour = config[0][-4] # 4th from top in stack\n\n # \tfour_stk_lemma = 'FOUR_STK_LEMMA_' + str(sent_dict['LEMMA'][four].lower())\n # \tfeatures.append(four_stk_lemma)\n\n # \tfour_stk_cpostag = 'FOUR_STK_CPOSTAG_' + str(sent_dict['CPOSTAG'][four].lower())\n # \tfeatures.append(four_stk_cpostag)\n\n if len(config[1]) > 3:\n \tfour_buffer = config[1][-4] # 4th from top in buffer\n\n \t# four_buffer_lemma = 'FOUR_BUFFER_LEMMA_' + str(sent_dict['LEMMA'][four_buffer].lower())\n \t# features.append(four_buffer_lemma)\n\n \tfour_buffer_cpostag = 'FOUR_BUFFER_CPOSTAG_' + str(sent_dict['CPOSTAG'][four_buffer].lower())\n \tfeatures.append(four_buffer_cpostag)\n\n\n return features\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from gym_mag.envs.mag_control_env import MagControlEnv
|
flexible
|
{
"blob_id": "dd7896e3beb5e33282b38efe0a4fc650e629b185",
"index": 5081,
"step-1": "<mask token>\n",
"step-2": "from gym_mag.envs.mag_control_env import MagControlEnv\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, '*')):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (filename for _, _, files in os.walk(dirname) for filename in
files if int(filename) >= size_in_kb * ONE_KB)
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, '*')):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ONE_KB = 1024
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (filename for _, _, files in os.walk(dirname) for filename in
files if int(filename) >= size_in_kb * ONE_KB)
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, '*')):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
<|reserved_special_token_1|>
import os
import glob
ONE_KB = 1024
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (filename for _, _, files in os.walk(dirname) for filename in
files if int(filename) >= size_in_kb * ONE_KB)
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, '*')):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
<|reserved_special_token_1|>
import os
import glob
ONE_KB = 1024
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (
filename
for _, _, files in os.walk(dirname)
for filename in files
if int(filename) >= size_in_kb * ONE_KB
)
# Pybites solution
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, "*")):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
|
flexible
|
{
"blob_id": "0dec0f04cfe891eea74ef45484fa7433e3429dcd",
"index": 7570,
"step-1": "<mask token>\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-2": "<mask token>\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-3": "<mask token>\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-4": "import os\nimport glob\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-5": "import os\nimport glob\n\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (\n filename\n for _, _, files in os.walk(dirname)\n for filename in files\n if int(filename) >= size_in_kb * ONE_KB\n )\n\n\n# Pybites solution\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, \"*\")):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = (center + half_size + 1 - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(
3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,
float]], center_crop: bool=True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *
icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *
icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *
(1 - icropz))]
return view
<|reserved_special_token_0|>
def threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
if image.min() == image.max():
raise ValueError(
'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'
.format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = (center + half_size + 1 - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(
3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,
float]], center_crop: bool=True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *
icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *
icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *
(1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:
float=1.0) ->None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size
), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size
), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
<|reserved_special_token_0|>
def threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
if image.min() == image.max():
raise ValueError(
'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'
.format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = (center + half_size + 1 - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(
3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,
float]], center_crop: bool=True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *
icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *
icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *
(1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:
float=1.0) ->None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size
), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size
), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
if image.min() == image.max():
raise ValueError(
'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'
.format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
<|reserved_special_token_1|>
from typing import Union, Tuple
import numpy as np
from dispim import Volume
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = (center + half_size + 1 - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(
3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,
float]], center_crop: bool=True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *
icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *
icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *
(1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:
float=1.0) ->None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size
), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size
), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
if image.min() == image.max():
raise ValueError(
'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'
.format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
<|reserved_special_token_1|>
from typing import Union, Tuple
import numpy as np
from dispim import Volume
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
# FIXME: Doesn't always return the expected shape
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = ((center + half_size + 1) - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx),
int(h / 2 * icropy):int(-h / 2 * icropy),
int(l / 2 * icropz):int(-l / 2 * icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
# Check if the image is multi-colored or not
if image.min() == image.max():
raise ValueError("threshold_otsu is expected to work with images "
"having more than one color. The input image seems "
"to have just one color {0}.".format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
|
flexible
|
{
"blob_id": "26f486131bdf514cd8e41f75d414fe647eaf1140",
"index": 9243,
"step-1": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-2": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-3": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-4": "from typing import Union, Tuple\nimport numpy as np\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-5": "from typing import Union, Tuple\n\nimport numpy as np\n\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n # FIXME: Doesn't always return the expected shape\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n\n max_missing = ((center + half_size + 1) - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n\n w, h, l = data.shape\n\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx),\n int(h / 2 * icropy):int(-h / 2 * icropy),\n int(l / 2 * icropz):int(-l / 2 * icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]\n\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n # Check if the image is multi-colored or not\n if image.min() == image.max():\n raise ValueError(\"threshold_otsu is expected to work with images \"\n \"having more than one color. The input image seems \"\n \"to have just one color {0}.\".format(image.min()))\n\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n\n # class probabilities for all possible thresholds\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n # class means for all possible thresholds\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n\n # Clip ends to align class 1 and class 2 variables:\n # The last value of `weight1`/`mean1` should pair with zero values in\n # `weight2`/`mean2`, which do not exist.\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
<|reserved_special_token_0|>
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pytestmark = pytest.mark.django_db
MYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
<|reserved_special_token_1|>
from __future__ import absolute_import, unicode_literals
from django.db import DataError, IntegrityError, connection
import pytest
from .models import Page
pytestmark = pytest.mark.django_db
MYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\w]*', '[\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
|
flexible
|
{
"blob_id": "96065e7e61b63f915561f117d71092e4bfb9a5da",
"index": 1149,
"step-1": "<mask token>\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-2": "<mask token>\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\n<mask token>\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-3": "<mask token>\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\n@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\\\w]*', '[\\\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-4": "<mask token>\npytestmark = pytest.mark.django_db\nMYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\n@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\\\w]*', '[\\\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-5": "from __future__ import absolute_import, unicode_literals\n\nfrom django.db import DataError, IntegrityError, connection\n\nimport pytest\n\nfrom .models import Page\n\npytestmark = pytest.mark.django_db\n\nMYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'\n\n\ndef test_match():\n Page.objects.create(url='^/[A-Z]*/$')\n assert Page.objects.filter(url__match='/PATH/')\n assert not Page.objects.filter(url__match='/path/')\n\n\ndef test_imatch():\n Page.objects.create(url='^/[a-z]*/$')\n assert Page.objects.filter(url__imatch='/path/')\n assert Page.objects.filter(url__imatch='/PATH/')\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\n@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))\ndef test_empty_regex(regex):\n with pytest.raises(IntegrityError):\n Page.objects.create(url=regex)\n\n\n@pytest.mark.skipif('connection.vendor == \"mysql\"', reason=MYSQL_REASON)\ndef test_invalid_regex():\n exception = IntegrityError if connection.vendor == 'sqlite' else DataError\n with pytest.raises(exception):\n Page.objects.create(url='(?P<match>.*)')\n",
"step-ids": [
1,
3,
4,
5,
7
]
}
|
[
1,
3,
4,
5,
7
] |
/Users/AbbyPennington/anaconda/lib/python3.5/os.py
|
normal
|
{
"blob_id": "8c4006ed8f4b1744f0316a61d95458b227653fee",
"index": 5887,
"step-1": "/Users/AbbyPennington/anaconda/lib/python3.5/os.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpotAdmin(LeafletGeoAdmin):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpotAdmin(LeafletGeoAdmin):
pass
admin.site.register(Spot, SpotAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Spot
from leaflet.admin import LeafletGeoAdmin
class SpotAdmin(LeafletGeoAdmin):
pass
admin.site.register(Spot, SpotAdmin)
|
flexible
|
{
"blob_id": "7633944366c6655306bc41087b19a474e9c414b5",
"index": 7688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\nadmin.site.register(Spot, SpotAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Spot\nfrom leaflet.admin import LeafletGeoAdmin\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\nadmin.site.register(Spot, SpotAdmin)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import xml.etree.ElementTree as ET
class Stage:
def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):
self.costumes = costumes
self.sounds = sounds
self.variables = variables
self.blocks = blocks
self.scripts = scripts
self.sprites = sprites
class Sprite:
def __init__(self, name: str, index: str, xCoord: int, yCoord: int,
heading: int, scale: float, volume: int, pan: int, rotation: int,
draggable: bool, hidden: bool, costumes: str, color: (float, float,
float), pen: str, id: int):
self.name = name
self.index = index
self.coords = xCoord, yCoord
self.heading = heading
self.scale = scale
self.volume = volume
self.pan = pan
self.rotation = rotation
self.draggable = draggable
self.hidden = hidden
self.costumes = costumes
self.color = color
self.pen = pen
self.id = id
|
normal
|
{
"blob_id": "575768c200ad81f878c132d68569c84f497091f2",
"index": 8137,
"step-1": "<mask token>\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-2": "<mask token>\n\n\nclass Stage:\n <mask token>\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-3": "<mask token>\n\n\nclass Stage:\n\n def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):\n self.costumes = costumes\n self.sounds = sounds\n self.variables = variables\n self.blocks = blocks\n self.scripts = scripts\n self.sprites = sprites\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-4": "import xml.etree.ElementTree as ET\n\n\nclass Stage:\n\n def __init__(self, costumes, sounds, variables, blocks, scripts, sprites):\n self.costumes = costumes\n self.sounds = sounds\n self.variables = variables\n self.blocks = blocks\n self.scripts = scripts\n self.sprites = sprites\n\n\nclass Sprite:\n\n def __init__(self, name: str, index: str, xCoord: int, yCoord: int,\n heading: int, scale: float, volume: int, pan: int, rotation: int,\n draggable: bool, hidden: bool, costumes: str, color: (float, float,\n float), pen: str, id: int):\n self.name = name\n self.index = index\n self.coords = xCoord, yCoord\n self.heading = heading\n self.scale = scale\n self.volume = volume\n self.pan = pan\n self.rotation = rotation\n self.draggable = draggable\n self.hidden = hidden\n self.costumes = costumes\n self.color = color\n self.pen = pen\n self.id = id\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.