code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Add gumpy path
sys.path.append('../shared')
from gumpy import signal
import numpy as np
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,
hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
|
normal
|
{
"blob_id": "5f1cbe1019f218d2aad616ea8bbe760ea760534c",
"index": 9359,
"step-1": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\n<mask token>\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\n<mask token>\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-2": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-3": "<mask token>\nsys.path.append('../shared')\n<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-4": "import sys\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\n# Add gumpy path\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,\n hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class MaskShadowGANModel(BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',
cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(
), dataB_iter.get_next()
else:
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain, training
=self.training, name='G')
if self.training:
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_B')
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(
reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(
reconstructedB))
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,
reconstructedA, reconstructedB, identA, identB)
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else:
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,
identB):
"""
Compute the losses for the generators and discriminators.
"""
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.
realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -
self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.
abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.
abs(identA - self.realB)))
return loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MaskShadowGANModel(BaseModel):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',
cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(
), dataB_iter.get_next()
else:
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain, training
=self.training, name='G')
if self.training:
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_B')
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(
reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(
reconstructedB))
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,
reconstructedA, reconstructedB, identA, identB)
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else:
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,
identB):
"""
Compute the losses for the generators and discriminators.
"""
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.
realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -
self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.
abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.
abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = tf.where(tf.greater_equal(global_step,
start_decay_step), tf.train.polynomial_decay(
starter_learning_rate, global_step - start_decay_step,
decay_steps, end_learning_rate, power=1.0),
starter_learning_rate)
learning_step = tf.train.AdamOptimizer(learning_rate, beta1=
beta1, name=name).minimize(loss, global_step=global_step,
var_list=variables)
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.
variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=
'Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=
'Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer,
D_B_optimizer]):
return tf.no_op(name='optimizers')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MaskShadowGANModel(BaseModel):
"""
Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.
A: shadow images domain
B: shadow free images domain
Paper: https://arxiv.org/pdf/1903.10683.pdf
"""
def __init__(self, opt, training):
BaseModel.__init__(self, opt, training)
self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.
batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.
batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, 1])
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',
cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(
), dataB_iter.get_next()
else:
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain, training
=self.training, name='G')
if self.training:
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_B')
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(
reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(
reconstructedB))
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,
reconstructedA, reconstructedB, identA, identB)
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else:
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,
identB):
"""
Compute the losses for the generators and discriminators.
"""
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.
realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -
self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.
abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.
abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = tf.where(tf.greater_equal(global_step,
start_decay_step), tf.train.polynomial_decay(
starter_learning_rate, global_step - start_decay_step,
decay_steps, end_learning_rate, power=1.0),
starter_learning_rate)
learning_step = tf.train.AdamOptimizer(learning_rate, beta1=
beta1, name=name).minimize(loss, global_step=global_step,
var_list=variables)
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.
variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=
'Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=
'Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer,
D_B_optimizer]):
return tf.no_op(name='optimizers')
<|reserved_special_token_1|>
import tensorflow as tf
from models.base_model import BaseModel
from utils.im_utils import batch_convert_2_int
from datasets.single_dataset import SingleDataset
from datasets.unpaired_dataset import UnpairedDataset
from models.generators.maskshadowgan_generators import Generator
from models.discriminators.maskshadowgan_discriminators import Discriminator
class MaskShadowGANModel(BaseModel):
"""
Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.
A: shadow images domain
B: shadow free images domain
Paper: https://arxiv.org/pdf/1903.10683.pdf
"""
def __init__(self, opt, training):
BaseModel.__init__(self, opt, training)
self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.
batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.
batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,
self.opt.crop_size, self.opt.crop_size, 1])
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',
cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(
), dataB_iter.get_next()
else:
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain, training
=self.training, name='G')
if self.training:
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.
weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.
opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self
.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='D_B')
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(
reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(
reconstructedB))
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,
reconstructedA, reconstructedB, identA, identB)
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else:
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,
identB):
"""
Compute the losses for the generators and discriminators.
"""
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.
realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -
self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.
abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.
abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = tf.where(tf.greater_equal(global_step,
start_decay_step), tf.train.polynomial_decay(
starter_learning_rate, global_step - start_decay_step,
decay_steps, end_learning_rate, power=1.0),
starter_learning_rate)
learning_step = tf.train.AdamOptimizer(learning_rate, beta1=
beta1, name=name).minimize(loss, global_step=global_step,
var_list=variables)
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.
variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=
'Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=
'Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer,
D_B_optimizer]):
return tf.no_op(name='optimizers')
<|reserved_special_token_1|>
import tensorflow as tf
from models.base_model import BaseModel
from utils.im_utils import batch_convert_2_int
from datasets.single_dataset import SingleDataset
from datasets.unpaired_dataset import UnpairedDataset
from models.generators.maskshadowgan_generators import Generator
from models.discriminators.maskshadowgan_discriminators import Discriminator
class MaskShadowGANModel(BaseModel):
"""
Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.
A: shadow images domain
B: shadow free images domain
Paper: https://arxiv.org/pdf/1903.10683.pdf
"""
def __init__(self, opt, training):
BaseModel.__init__(self, opt, training)
# create placeholders for images and shadow masks
self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()
else: # only need shadow dataset for testing
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
# add ops for generator (A->B) to graph
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,
init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='G')
if self.training:
# add ops for other generator (B->A) and discriminators to graph
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')
# generate fake images
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
# generate reconstructed images
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
# generate identity mapping images
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))
# add loss ops to graph
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,
reconstructedB, identA, identB)
# add optimizer ops to graph
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else: # only need generator from A->B during testing
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):
"""
Compute the losses for the generators and discriminators.
"""
# compute the generators loss
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
# Compute the disciminators loss. Use fake images from image pool to improve stability
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),
tf.train.polynomial_decay(starter_learning_rate,
global_step-start_decay_step,
decay_steps, end_learning_rate,
power=1.0),
starter_learning_rate))
learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)
.minimize(loss, global_step=global_step, var_list=variables))
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):
return tf.no_op(name='optimizers')
|
flexible
|
{
"blob_id": "cbbe273a19a4e60b760e35aeb8d43972a46760f5",
"index": 3436,
"step-1": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n <mask token>\n <mask token>\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n <mask token>\n <mask token>\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-3": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-4": "import tensorflow as tf\nfrom models.base_model import BaseModel\nfrom utils.im_utils import batch_convert_2_int\nfrom datasets.single_dataset import SingleDataset\nfrom datasets.unpaired_dataset import UnpairedDataset\nfrom models.generators.maskshadowgan_generators import Generator\nfrom models.discriminators.maskshadowgan_discriminators import Discriminator\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-5": "import tensorflow as tf\nfrom models.base_model import BaseModel\nfrom utils.im_utils import batch_convert_2_int\nfrom datasets.single_dataset import SingleDataset\nfrom datasets.unpaired_dataset import UnpairedDataset\nfrom models.generators.maskshadowgan_generators import Generator\nfrom models.discriminators.maskshadowgan_discriminators import Discriminator\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n\n # create placeholders for images and shadow masks\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n # add ops for generator (A->B) to graph\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,\n init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='G')\n\n if self.training:\n # add ops for other generator (B->A) and discriminators to graph\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')\n\n # generate fake images\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n\n # generate reconstructed images\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n\n # generate identity mapping images\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))\n\n # add loss ops to graph\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,\n reconstructedB, identA, identB)\n\n # add optimizer ops to graph\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else: # only need generator from A->B during testing\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n # compute the generators loss\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n\n # Compute the disciminators loss. Use fake images from image pool to improve stability\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \\\n self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))\n\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \\\n self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))\n\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),\n tf.train.polynomial_decay(starter_learning_rate,\n global_step-start_decay_step,\n decay_steps, end_learning_rate,\n power=1.0),\n starter_learning_rate))\n\n learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)\n .minimize(loss, global_step=global_step, var_list=variables))\n\n return learning_step\n\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')\n\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-ids": [
8,
9,
11,
12,
13
]
}
|
[
8,
9,
11,
12,
13
] |
<|reserved_special_token_0|>
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = 'TJQKA'
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return 'four-of-a-kind', f
return False
<|reserved_special_token_0|>
def flush(hand):
allfaces = [f for f, s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f),
reverse=True)
return False
<|reserved_special_token_0|>
def two_pair(hand):
allfaces = [f for f, s in hand]
allftypes = set(allfaces)
pairs = [f for f in allftypes if allfaces.count(f) == 2]
if len(pairs) != 2:
return False
p1, p2 = pairs
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2
) else pairs[::-1] + other_cards
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def character_frequency(s):
freq = {}
for i in s:
if i in freq:
freq[i] += 1
else:
freq[i] = 1
return freq
<|reserved_special_token_0|>
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = 'TJQKA'
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return 'four-of-a-kind', f
return False
<|reserved_special_token_0|>
def flush(hand):
allfaces = [f for f, s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f),
reverse=True)
return False
<|reserved_special_token_0|>
def two_pair(hand):
allfaces = [f for f, s in hand]
allftypes = set(allfaces)
pairs = [f for f in allftypes if allfaces.count(f) == 2]
if len(pairs) != 2:
return False
p1, p2 = pairs
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2
) else pairs[::-1] + other_cards
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def character_frequency(s):
freq = {}
for i in s:
if i in freq:
freq[i] += 1
else:
freq[i] = 1
return freq
<|reserved_special_token_0|>
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = 'TJQKA'
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return 'four-of-a-kind', f
return False
<|reserved_special_token_0|>
def flush(hand):
allfaces = [f for f, s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f),
reverse=True)
return False
def straight(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight', ordered[-1].face
return False
def three_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 3:
return False
for f in uniqueRanks:
if allfaces.count(f) == 3:
uniqueRanks.remove(f)
return 'three-of-a-kind', f
return False
def two_pair(hand):
allfaces = [f for f, s in hand]
allftypes = set(allfaces)
pairs = [f for f in allftypes if allfaces.count(f) == 2]
if len(pairs) != 2:
return False
p1, p2 = pairs
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2
) else pairs[::-1] + other_cards
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start = datetime.now()
<|reserved_special_token_0|>
def character_frequency(s):
freq = {}
for i in s:
if i in freq:
freq[i] += 1
else:
freq[i] = 1
return freq
suits = 'HDCS'.split()
faces = '2,3,4,5,6,7,8,9,T,J,Q,K,A'
face = faces.split(',')
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = 'TJQKA'
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
first_card = ordered[0]
other_cards = ordered[1:]
if all(first_card.suit == card.suit for card in other_cards):
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return 'four-of-a-kind', f
return False
def full_house(hand):
allfaces = [f for f, s in hand]
rankFrequency = character_frequency(allfaces)
if len(rankFrequency) == 2 and (rankFrequency.values()[0] == 2 and
rankFrequency.values()[1] == 3):
return 'full-house'
return False
def flush(hand):
allfaces = [f for f, s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f),
reverse=True)
return False
def straight(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)
)
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight', ordered[-1].face
return False
def three_of_a_kind(hand):
allfaces = [f for f, s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 3:
return False
for f in uniqueRanks:
if allfaces.count(f) == 3:
uniqueRanks.remove(f)
return 'three-of-a-kind', f
return False
def two_pair(hand):
allfaces = [f for f, s in hand]
allftypes = set(allfaces)
pairs = [f for f in allftypes if allfaces.count(f) == 2]
if len(pairs) != 2:
return False
p1, p2 = pairs
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2
) else pairs[::-1] + other_cards
def one_pair(hand):
allfaces = [f for f, s in hand]
allftypes = set(allfaces)
pairs = [f for f in allftypes if allfaces.count(f) == 2]
if len(pairs) != 1:
return False
allftypes.remove(pairs[0])
return 'one-pair', pairs + sorted(allftypes, key=lambda f: face.index(f
), reverse=True)
def high_card(hand):
allfaces = [f for f, s in hand]
return 'high_card', sorted(allfaces, key=lambda f: allfaces.index(f),
reverse=True)[0]
def create_hand_tuple(cards='5D 8C 9S JS AC'):
hand = []
for card in cards.split():
face, suit = card[:-1], card[-1]
hand.append(Card(face, suit))
return hand
handrankorder = (royal_flush, straight_flush, four_of_a_kind, full_house,
flush, straight, three_of_a_kind, two_pair, one_pair, high_card)
def determine_rank(cards):
hand = create_hand_tuple(cards)
for ranker in handrankorder:
rank = ranker(hand)
if rank:
break
return rank
for play in open('p054_poker.txt', 'r').readlines():
play = play.strip()
h1 = play[:15]
h2 = play[15:]
print(f'{determine_rank(h1)}\t\t{determine_rank(h2)}')
print(f"""
fin in {datetime.now() - start}""")
<|reserved_special_token_1|>
from datetime import datetime
start = datetime.now()
# Poker Hand Analyser Library for Project Euler: Problem 54
from collections import namedtuple
# import pe_lib
def character_frequency(s):
freq = {}
for i in s:
if i in freq:
freq[i] += 1
else:
freq[i] = 1
return freq
suits = "HDCS".split()
faces = "2,3,4,5,6,7,8,9,T,J,Q,K,A"
face = faces.split(',')
class Card(namedtuple('Card', 'face, suit')):
def __repr__(self):
return ''.join(self)
def royal_flush(hand):
royalface = "TJQKA"
# sort the cards based on the face rank of each card
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
first_card = ordered[0]
other_cards = ordered[1:]
# check if all are of the same suit
if all(first_card.suit == card.suit for card in other_cards):
# check if they are in sequential order
# compare the ordered faces substring with the face list (which is converted to string)
if ''.join(card.face for card in ordered) in royalface:
return 'royal-flush', ordered[-1].face
return False
def straight_flush(hand):
# sort the cards based on the face rank of each card
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
first_card = ordered[0]
other_cards = ordered[1:]
# check if all are of the same suit
if all(first_card.suit == card.suit for card in other_cards):
# check if they are in sequential order
# compare the ordered faces substring with the face list (which is converted to string)
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight-flush', ordered[-1].face
return False
def four_of_a_kind(hand):
allfaces = [f for f,s in hand]
# create a unique set of ranks
uniqueRanks = set(allfaces)
# if there are more than 2 ranks, it's not four of a kind
if len(uniqueRanks) != 2:
return False
for f in uniqueRanks:
# if there are 4 faces, it is four of a kind
if allfaces.count(f) == 4:
uniqueRanks.remove(f)
return "four-of-a-kind", f
return False
def full_house(hand):
allfaces = [f for f,s in hand]
rankFrequency = character_frequency(allfaces)
# if there are 2 types of ranks and there's a card with 1 pair and 3 of a kind
if len(rankFrequency) == 2 and (rankFrequency.values()[0] == 2 and rankFrequency.values()[1] == 3):
return 'full-house'
return False
def flush(hand):
allfaces = [f for f,s in hand]
first_card = hand[0]
other_cards = hand[1:]
if all(first_card.suit == card.suit for card in other_cards):
return 'flush', sorted(allfaces, key=lambda f: face.index(f), reverse=True)
return False
def straight(hand):
ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))
if ''.join(card.face for card in ordered) in ''.join(face):
return 'straight', ordered[-1].face
return False;
def three_of_a_kind(hand):
allfaces = [f for f,s in hand]
uniqueRanks = set(allfaces)
if len(uniqueRanks) != 3:
return False
for f in uniqueRanks:
if allfaces.count(f) == 3:
uniqueRanks.remove(f)
return "three-of-a-kind", f
return False;
def two_pair(hand):
allfaces = [f for f,s in hand]
allftypes = set(allfaces)
# collect pairs
pairs = [f for f in allftypes if allfaces.count(f) == 2]
# if there are more than two pairs
if len(pairs) != 2:
return False
p1, p2 = pairs
# get the difference using sets
other_cards = [(allftypes - set(pairs)).pop()]
return 'two-pair', pairs + other_cards if(face.index(p1) > face.index(p2)) else pairs[::-1] + other_cards
def one_pair(hand):
allfaces = [f for f,s in hand]
allftypes = set(allfaces)
# collect pairs
pairs = [f for f in allftypes if allfaces.count(f) == 2]
# if there's more than one pair
if len(pairs) != 1:
return False
allftypes.remove(pairs[0])
return 'one-pair', pairs + sorted(allftypes, key=lambda f: face.index(f), reverse=True)
def high_card(hand):
# collect all faces from each card
allfaces = [f for f,s in hand]
#sort the faces and show the highest card
return "high_card", sorted(allfaces, key=lambda f: allfaces.index(f), reverse=True)[0]
def create_hand_tuple(cards = "5D 8C 9S JS AC"):
hand = []
for card in cards.split():
face, suit = card[:-1], card[-1]
hand.append(Card(face, suit))
return hand;
# functions
handrankorder = (royal_flush,straight_flush,four_of_a_kind,full_house,
flush,straight,three_of_a_kind,two_pair,
one_pair,high_card)
def determine_rank(cards):
hand = create_hand_tuple(cards)
for ranker in handrankorder:
rank = ranker(hand)
if rank:
break
return rank
for play in open('p054_poker.txt', 'r').readlines():
play = play.strip()
h1 = play[:15]
h2 = play[15:]
print(f"{determine_rank(h1)}\t\t{determine_rank(h2)}")
print(f"\n\n\nfin in {datetime.now()-start}")
|
flexible
|
{
"blob_id": "561763d4d7b613446f2890ef629b631542f2f472",
"index": 2776,
"step-1": "<mask token>\n\n\nclass Card(namedtuple('Card', 'face, suit')):\n\n def __repr__(self):\n return ''.join(self)\n\n\ndef royal_flush(hand):\n royalface = 'TJQKA'\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in royalface:\n return 'royal-flush', ordered[-1].face\n return False\n\n\ndef straight_flush(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight-flush', ordered[-1].face\n return False\n\n\ndef four_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 2:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 4:\n uniqueRanks.remove(f)\n return 'four-of-a-kind', f\n return False\n\n\n<mask token>\n\n\ndef flush(hand):\n allfaces = [f for f, s in hand]\n first_card = hand[0]\n other_cards = hand[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n return 'flush', sorted(allfaces, key=lambda f: face.index(f),\n reverse=True)\n return False\n\n\n<mask token>\n\n\ndef two_pair(hand):\n allfaces = [f for f, s in hand]\n allftypes = set(allfaces)\n pairs = [f for f in allftypes if allfaces.count(f) == 2]\n if len(pairs) != 2:\n return False\n p1, p2 = pairs\n other_cards = [(allftypes - set(pairs)).pop()]\n return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2\n ) else pairs[::-1] + other_cards\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef character_frequency(s):\n freq = {}\n for i in s:\n if i in freq:\n freq[i] += 1\n else:\n freq[i] = 1\n return freq\n\n\n<mask token>\n\n\nclass Card(namedtuple('Card', 'face, suit')):\n\n def __repr__(self):\n return ''.join(self)\n\n\ndef royal_flush(hand):\n royalface = 'TJQKA'\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in royalface:\n return 'royal-flush', ordered[-1].face\n return False\n\n\ndef straight_flush(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight-flush', ordered[-1].face\n return False\n\n\ndef four_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 2:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 4:\n uniqueRanks.remove(f)\n return 'four-of-a-kind', f\n return False\n\n\n<mask token>\n\n\ndef flush(hand):\n allfaces = [f for f, s in hand]\n first_card = hand[0]\n other_cards = hand[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n return 'flush', sorted(allfaces, key=lambda f: face.index(f),\n reverse=True)\n return False\n\n\n<mask token>\n\n\ndef two_pair(hand):\n allfaces = [f for f, s in hand]\n allftypes = set(allfaces)\n pairs = [f for f in allftypes if allfaces.count(f) == 2]\n if len(pairs) != 2:\n return False\n p1, p2 = pairs\n other_cards = [(allftypes - set(pairs)).pop()]\n return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2\n ) else pairs[::-1] + other_cards\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef character_frequency(s):\n freq = {}\n for i in s:\n if i in freq:\n freq[i] += 1\n else:\n freq[i] = 1\n return freq\n\n\n<mask token>\n\n\nclass Card(namedtuple('Card', 'face, suit')):\n\n def __repr__(self):\n return ''.join(self)\n\n\ndef royal_flush(hand):\n royalface = 'TJQKA'\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in royalface:\n return 'royal-flush', ordered[-1].face\n return False\n\n\ndef straight_flush(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight-flush', ordered[-1].face\n return False\n\n\ndef four_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 2:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 4:\n uniqueRanks.remove(f)\n return 'four-of-a-kind', f\n return False\n\n\n<mask token>\n\n\ndef flush(hand):\n allfaces = [f for f, s in hand]\n first_card = hand[0]\n other_cards = hand[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n return 'flush', sorted(allfaces, key=lambda f: face.index(f),\n reverse=True)\n return False\n\n\ndef straight(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight', ordered[-1].face\n return False\n\n\ndef three_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 3:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 3:\n uniqueRanks.remove(f)\n return 'three-of-a-kind', f\n return False\n\n\ndef two_pair(hand):\n allfaces = [f for f, s in hand]\n allftypes = set(allfaces)\n pairs = [f for f in allftypes if allfaces.count(f) == 2]\n if len(pairs) != 2:\n return False\n p1, p2 = pairs\n other_cards = [(allftypes - set(pairs)).pop()]\n return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2\n ) else pairs[::-1] + other_cards\n\n\n<mask token>\n",
"step-4": "<mask token>\nstart = datetime.now()\n<mask token>\n\n\ndef character_frequency(s):\n freq = {}\n for i in s:\n if i in freq:\n freq[i] += 1\n else:\n freq[i] = 1\n return freq\n\n\nsuits = 'HDCS'.split()\nfaces = '2,3,4,5,6,7,8,9,T,J,Q,K,A'\nface = faces.split(',')\n\n\nclass Card(namedtuple('Card', 'face, suit')):\n\n def __repr__(self):\n return ''.join(self)\n\n\ndef royal_flush(hand):\n royalface = 'TJQKA'\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in royalface:\n return 'royal-flush', ordered[-1].face\n return False\n\n\ndef straight_flush(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n first_card = ordered[0]\n other_cards = ordered[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight-flush', ordered[-1].face\n return False\n\n\ndef four_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 2:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 4:\n uniqueRanks.remove(f)\n return 'four-of-a-kind', f\n return False\n\n\ndef full_house(hand):\n allfaces = [f for f, s in hand]\n rankFrequency = character_frequency(allfaces)\n if len(rankFrequency) == 2 and (rankFrequency.values()[0] == 2 and \n rankFrequency.values()[1] == 3):\n return 'full-house'\n return False\n\n\ndef flush(hand):\n allfaces = [f for f, s in hand]\n first_card = hand[0]\n other_cards = hand[1:]\n if all(first_card.suit == card.suit for card in other_cards):\n return 'flush', sorted(allfaces, key=lambda f: face.index(f),\n reverse=True)\n return False\n\n\ndef straight(hand):\n ordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit)\n )\n if ''.join(card.face for card in ordered) in ''.join(face):\n return 'straight', ordered[-1].face\n return False\n\n\ndef three_of_a_kind(hand):\n allfaces = [f for f, s in hand]\n uniqueRanks = set(allfaces)\n if len(uniqueRanks) != 3:\n return False\n for f in uniqueRanks:\n if allfaces.count(f) == 3:\n uniqueRanks.remove(f)\n return 'three-of-a-kind', f\n return False\n\n\ndef two_pair(hand):\n allfaces = [f for f, s in hand]\n allftypes = set(allfaces)\n pairs = [f for f in allftypes if allfaces.count(f) == 2]\n if len(pairs) != 2:\n return False\n p1, p2 = pairs\n other_cards = [(allftypes - set(pairs)).pop()]\n return 'two-pair', pairs + other_cards if face.index(p1) > face.index(p2\n ) else pairs[::-1] + other_cards\n\n\ndef one_pair(hand):\n allfaces = [f for f, s in hand]\n allftypes = set(allfaces)\n pairs = [f for f in allftypes if allfaces.count(f) == 2]\n if len(pairs) != 1:\n return False\n allftypes.remove(pairs[0])\n return 'one-pair', pairs + sorted(allftypes, key=lambda f: face.index(f\n ), reverse=True)\n\n\ndef high_card(hand):\n allfaces = [f for f, s in hand]\n return 'high_card', sorted(allfaces, key=lambda f: allfaces.index(f),\n reverse=True)[0]\n\n\ndef create_hand_tuple(cards='5D 8C 9S JS AC'):\n hand = []\n for card in cards.split():\n face, suit = card[:-1], card[-1]\n hand.append(Card(face, suit))\n return hand\n\n\nhandrankorder = (royal_flush, straight_flush, four_of_a_kind, full_house,\n flush, straight, three_of_a_kind, two_pair, one_pair, high_card)\n\n\ndef determine_rank(cards):\n hand = create_hand_tuple(cards)\n for ranker in handrankorder:\n rank = ranker(hand)\n if rank:\n break\n return rank\n\n\nfor play in open('p054_poker.txt', 'r').readlines():\n play = play.strip()\n h1 = play[:15]\n h2 = play[15:]\n print(f'{determine_rank(h1)}\\t\\t{determine_rank(h2)}')\nprint(f\"\"\"\n\n\nfin in {datetime.now() - start}\"\"\")\n",
"step-5": "from datetime import datetime\nstart = datetime.now()\n\n# Poker Hand Analyser Library for Project Euler: Problem 54\nfrom collections import namedtuple\n# import pe_lib\n\ndef character_frequency(s):\n\tfreq = {}\n\tfor i in s:\n\t\tif i in freq:\n\t\t\tfreq[i] += 1\n\t\telse:\n\t\t\tfreq[i] = 1\n\treturn freq\n\nsuits = \"HDCS\".split()\nfaces = \"2,3,4,5,6,7,8,9,T,J,Q,K,A\"\nface = faces.split(',')\n\nclass Card(namedtuple('Card', 'face, suit')):\n\tdef __repr__(self):\n\t\treturn ''.join(self)\n\ndef royal_flush(hand):\n\troyalface = \"TJQKA\"\n\t# sort the cards based on the face rank of each card\n\tordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))\n\n\tfirst_card = ordered[0]\n\tother_cards = ordered[1:]\n\n\t# check if all are of the same suit\n\tif all(first_card.suit == card.suit for card in other_cards):\n\t\t# check if they are in sequential order\n\t\t# compare the ordered faces substring with the face list (which is converted to string)\n\t\tif ''.join(card.face for card in ordered) in royalface:\n\t\t\treturn 'royal-flush', ordered[-1].face\n\treturn False\n\ndef straight_flush(hand):\n\t# sort the cards based on the face rank of each card\n\tordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))\n\n\tfirst_card = ordered[0]\n\tother_cards = ordered[1:]\n\n\t# check if all are of the same suit\n\tif all(first_card.suit == card.suit for card in other_cards):\n\t\t# check if they are in sequential order\n\t\t# compare the ordered faces substring with the face list (which is converted to string)\n\t\tif ''.join(card.face for card in ordered) in ''.join(face):\n\t\t\treturn 'straight-flush', ordered[-1].face\n\treturn False\n\ndef four_of_a_kind(hand):\n\tallfaces = [f for f,s in hand]\n\t\n\t# create a unique set of ranks\n\tuniqueRanks = set(allfaces)\n\n\t# if there are more than 2 ranks, it's not four of a kind\n\tif len(uniqueRanks) != 2:\n\t\treturn False\n\n\tfor f in uniqueRanks:\n\t\t# if there are 4 faces, it is four of a kind\n\t\tif allfaces.count(f) == 4:\n\t\t\tuniqueRanks.remove(f)\n\t\t\treturn \"four-of-a-kind\", f\n\n\treturn False\n\ndef full_house(hand):\n\tallfaces = [f for f,s in hand]\n\n\trankFrequency = character_frequency(allfaces)\n\n\t# if there are 2 types of ranks and there's a card with 1 pair and 3 of a kind\n\tif len(rankFrequency) == 2 and (rankFrequency.values()[0] == 2 and rankFrequency.values()[1] == 3):\n\t\treturn 'full-house'\n\n\treturn False\n\ndef flush(hand):\n\tallfaces = [f for f,s in hand]\n\n\tfirst_card = hand[0]\n\tother_cards = hand[1:]\n\n\tif all(first_card.suit == card.suit for card in other_cards):\n\t\treturn 'flush', sorted(allfaces, key=lambda f: face.index(f), reverse=True)\n\n\treturn False\n\ndef straight(hand):\n\tordered = sorted(hand, key=lambda card: (faces.index(card.face), card.suit))\n\tif ''.join(card.face for card in ordered) in ''.join(face):\n\t\treturn 'straight', ordered[-1].face\n\treturn False;\n\ndef three_of_a_kind(hand):\n\tallfaces = [f for f,s in hand]\n\n\tuniqueRanks = set(allfaces)\n\n\tif len(uniqueRanks) != 3:\n\t\treturn False\n\n\tfor f in uniqueRanks:\n\t\tif allfaces.count(f) == 3:\n\t\t\tuniqueRanks.remove(f)\n\t\t\treturn \"three-of-a-kind\", f\n\n\treturn False;\n\ndef two_pair(hand):\n\tallfaces = [f for f,s in hand]\n\tallftypes = set(allfaces)\n\t\n\t# collect pairs\n\tpairs = [f for f in allftypes if allfaces.count(f) == 2]\n\t\n\t# if there are more than two pairs\n\tif len(pairs) != 2:\n\t\treturn False\n\n\tp1, p2 = pairs\n\t# get the difference using sets\n\tother_cards = [(allftypes - set(pairs)).pop()]\n\treturn 'two-pair', pairs + other_cards if(face.index(p1) > face.index(p2)) else pairs[::-1] + other_cards\n\ndef one_pair(hand):\n\tallfaces = [f for f,s in hand]\n\tallftypes = set(allfaces)\n\n\t# collect pairs\n\tpairs = [f for f in allftypes if allfaces.count(f) == 2]\n\n\t# if there's more than one pair\n\tif len(pairs) != 1:\n\t\treturn False\n\n\tallftypes.remove(pairs[0])\n\treturn 'one-pair', pairs + sorted(allftypes, key=lambda f: face.index(f), reverse=True)\n\ndef high_card(hand):\n\t# collect all faces from each card\n\tallfaces = [f for f,s in hand]\n\n\t#sort the faces and show the highest card\n\treturn \"high_card\", sorted(allfaces, key=lambda f: allfaces.index(f), reverse=True)[0] \n\ndef create_hand_tuple(cards = \"5D 8C 9S JS AC\"):\n\thand = []\n\n\tfor card in cards.split():\n\t\tface, suit = card[:-1], card[-1]\n\t\thand.append(Card(face, suit))\n\n\treturn hand;\n\n# functions\nhandrankorder = (royal_flush,straight_flush,four_of_a_kind,full_house,\n\t\t\t\tflush,straight,three_of_a_kind,two_pair,\n\t\t\t\tone_pair,high_card)\n\ndef determine_rank(cards):\n\thand = create_hand_tuple(cards)\n\tfor ranker in handrankorder:\n\t\trank = ranker(hand)\n\n\t\tif rank:\n\t\t\tbreak\n\treturn rank\n\n\nfor play in open('p054_poker.txt', 'r').readlines():\n\tplay = play.strip()\n\th1 = play[:15]\n\th2 = play[15:]\n\tprint(f\"{determine_rank(h1)}\\t\\t{determine_rank(h2)}\")\n\nprint(f\"\\n\\n\\nfin in {datetime.now()-start}\")\n",
"step-ids": [
7,
8,
10,
17,
19
]
}
|
[
7,
8,
10,
17,
19
] |
from django.db import models
from django.utils import timezone
from pprint import pprint
class Cast(models.Model):
name = models.CharField(max_length=50, blank=True, null=True)
image = models.ImageField(upload_to='cast', blank=True, null=True)
description = models.CharField(max_length=400, blank=True, null=True)
def __str__(self):
return self.name
class Issue(models.Model):
title = models.CharField(max_length=200, blank=True, null=True)
image = models.ImageField(upload_to='issues', blank=True, null=True)
issue_number = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Comic(models.Model):
MAX_PAGES_PER_ISSUE = 1000
sort_number = models.IntegerField(blank=True, null=True)
page_number = models.IntegerField(blank=True, null=True )
last_page = models.IntegerField(default=1)
title = models.CharField(max_length=200, blank=True, null=True)
issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=models.DO_NOTHING)
image = models.ImageField(upload_to='comics', blank=True, null=True)
date_added = models.DateTimeField(
help_text="Posted on: ",
default = timezone.now, null=True, blank=True
)
cast_members = models.ManyToManyField(Cast, related_name="comics", blank=True)
class Meta:
ordering = ['-sort_number', '-date_added']
def __str__(self):
return self.title
@staticmethod
def sortOrder(page_number):
# TODO: ADD ISSUE 3 LOGIC WHEN WE GET THERE
if int(page_number) < 33:
issue_num = 1
else:
issue_num = 2
# print('ISSUE NUM: ', issue_num)
order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)
# print ('SORT ORDER: ', order)
return order
def save(self, *args, **kwargs):
self.sort_number = Comic.sortOrder(self.page_number)
super(Comic, self).save(*args, **kwargs) # Call the "real" save() method.
class ComicManager(models.Model):
last_page = models.IntegerField(default=1)
class Meta:
verbose_name_plural = ("Comic Manager")
def __str__(self):
return str(self.last_page)
def save(self, *args, **kwargs):
super(ComicManager, self).save(*args, **kwargs)
# TODO - automate this so that anytime a comic is saved it checks last page status and runs here
# update all Comic instances to have this last page
comics = Comic.objects.all()
for comic in comics:
if comic.last_page < self.last_page:
comic.last_page = self.last_page
comic.save()
class HeaderImage(models.Model):
title = models.CharField(max_length=100, blank=True, null=True)
image = models.ImageField(upload_to='images', blank=True, null=True)
class Meta:
verbose_name_plural = ('Header Images')
def __str__(self):
return self.title
|
normal
|
{
"blob_id": "45dc9d362a2ddfd408f93452bda0b7338057ca81",
"index": 8322,
"step-1": "<mask token>\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-2": "<mask token>\n\n\nclass Issue(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-3": "<mask token>\n\n\nclass Issue(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\nfrom pprint import pprint\n\n\nclass Cast(models.Model):\n name = models.CharField(max_length=50, blank=True, null=True)\n image = models.ImageField(upload_to='cast', blank=True, null=True)\n description = models.CharField(max_length=400, blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Issue(models.Model):\n title = models.CharField(max_length=200, blank=True, null=True)\n image = models.ImageField(upload_to='issues', blank=True, null=True)\n issue_number = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True)\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=\n models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(help_text='Posted on: ', default=\n timezone.now, null=True, blank=True)\n cast_members = models.ManyToManyField(Cast, related_name='comics',\n blank=True)\n\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs)\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n\n class Meta:\n verbose_name_plural = 'Comic Manager'\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n\n class Meta:\n verbose_name_plural = 'Header Images'\n\n def __str__(self):\n return self.title\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom pprint import pprint\n\nclass Cast(models.Model):\n name = models.CharField(max_length=50, blank=True, null=True)\n image = models.ImageField(upload_to='cast', blank=True, null=True)\n description = models.CharField(max_length=400, blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Issue(models.Model):\n title = models.CharField(max_length=200, blank=True, null=True)\n image = models.ImageField(upload_to='issues', blank=True, null=True)\n issue_number = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return self.title\n\n\nclass Comic(models.Model):\n MAX_PAGES_PER_ISSUE = 1000\n sort_number = models.IntegerField(blank=True, null=True)\n page_number = models.IntegerField(blank=True, null=True )\n last_page = models.IntegerField(default=1)\n title = models.CharField(max_length=200, blank=True, null=True)\n issue = models.ForeignKey(Issue, blank=True, null=True, on_delete=models.DO_NOTHING)\n image = models.ImageField(upload_to='comics', blank=True, null=True)\n date_added = models.DateTimeField(\n help_text=\"Posted on: \",\n default = timezone.now, null=True, blank=True \n )\n cast_members = models.ManyToManyField(Cast, related_name=\"comics\", blank=True)\n\n class Meta:\n ordering = ['-sort_number', '-date_added']\n\n def __str__(self):\n return self.title\n\n @staticmethod\n def sortOrder(page_number):\n # TODO: ADD ISSUE 3 LOGIC WHEN WE GET THERE\n if int(page_number) < 33:\n issue_num = 1\n else:\n issue_num = 2\n # print('ISSUE NUM: ', issue_num)\n order = issue_num * Comic.MAX_PAGES_PER_ISSUE + int(page_number) \n # print ('SORT ORDER: ', order)\n return order\n\n def save(self, *args, **kwargs):\n self.sort_number = Comic.sortOrder(self.page_number)\n super(Comic, self).save(*args, **kwargs) # Call the \"real\" save() method.\n\n\n\nclass ComicManager(models.Model):\n last_page = models.IntegerField(default=1)\n\n class Meta:\n verbose_name_plural = (\"Comic Manager\")\n\n def __str__(self):\n return str(self.last_page)\n\n def save(self, *args, **kwargs):\n super(ComicManager, self).save(*args, **kwargs)\n # TODO - automate this so that anytime a comic is saved it checks last page status and runs here\n # update all Comic instances to have this last page\n comics = Comic.objects.all()\n for comic in comics:\n if comic.last_page < self.last_page:\n comic.last_page = self.last_page\n comic.save()\n\n\n\nclass HeaderImage(models.Model):\n title = models.CharField(max_length=100, blank=True, null=True)\n image = models.ImageField(upload_to='images', blank=True, null=True)\n\n class Meta: \n verbose_name_plural = ('Header Images')\n\n def __str__(self):\n return self.title\n\n",
"step-ids": [
12,
13,
14,
19,
20
]
}
|
[
12,
13,
14,
19,
20
] |
"""
Copyright (C) 2019-2020 Zilliz. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import requests
original_table_name = "raw_data"
table_name = "nyctaxi"
csv_path = "/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv"
SCOPE = "nyc_taxi"
def _get_line_count(file):
with open(file, "r") as f:
return len(f.readlines())
class TestScope():
@pytest.mark.run(order=1)
def test_create_scope(self, host, port):
url = "http://" + host + ":" + port + "/scope"
r = requests.post(url=url)
print(r.text)
assert r.status_code == 200
global SCOPE # pylint: disable=global-statement
SCOPE = r.json()['scope']
@pytest.mark.run(order=2)
def test_load_file(self, host, port):
url = "http://" + host + ":" + port + "/loadfile"
payload = {
"scope": SCOPE,
"tables": [
{
"name": original_table_name,
"format": "csv",
"path": csv_path,
"options": {
"header": "True",
"delimiter": ","
},
"schema": [
{"VendorID": "string"},
{"tpep_pickup_datetime": "string"},
{"tpep_dropoff_datetime": "string"},
{"passenger_count": "long"},
{"trip_distance": "double"},
{"pickup_longitude": "double"},
{"pickup_latitude": "double"},
{"dropoff_longitude": "double"},
{"dropoff_latitude": "double"},
{"fare_amount": "double"},
{"tip_amount": "double"},
{"total_amount": "double"},
{"buildingid_pickup": "long"},
{"buildingid_dropoff": "long"},
{"buildingtext_pickup": "string"},
{"buildingtext_dropoff": "string"}
]
}
]
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
# TODO: neccessary for /savefile? not convenient for cleaning up
@pytest.mark.run(order=3)
def test_table_schema(self, host, port):
url = "http://" + host + ":" + port + "/table/schema?table={}&scope={}".format(original_table_name, SCOPE)
r = requests.get(url=url)
print(r.text)
assert r.status_code == 200
assert len(r.json()['schema']) == 16
@pytest.mark.run(order=4)
def test_num_rows(self, host, port):
url = "http://" + host + ":" + port + "/query"
sql = "select count(*) as num_rows from {}".format(original_table_name)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == 1
assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path) - 1
@pytest.mark.run(order=5)
def test_query(self, host, port):
url = "http://" + host + ":" + port + "/query"
limit = 1
sql = "select * from {} limit {}".format(original_table_name, limit)
payload = {
"scope": SCOPE,
"sql": sql,
"collect_result": "1"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
assert len(r.json()['result']) == limit
@pytest.mark.run(order=6)
def test_create_table(self, host, port):
url = "http://" + host + ":" + port + "/query"
payload = {
"scope": SCOPE,
"sql": "create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))".format(table_name, original_table_name),
"collect_result": "0"
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=7)
def test_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"point_color": "#2DEF4A",
"point_size": 3,
"opacity": 0.5
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=8)
def test_weighted_pointmap(self, host, port):
url = "http://" + host + ":" + port + "/weighted_pointmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [0, 2],
"size_bound": [0, 10],
"opacity": 1.0,
"coordinate_system": "EPSG:4326"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=9)
def test_heatmap(self, host, port):
url = "http://" + host + ":" + port + "/heatmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"map_zoom_level": 10,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=10)
def test_choroplethmap(self, host, port):
url = "http://" + host + ":" + port + "/choroplethmap"
payload = {
"scope": SCOPE,
"sql": "select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"coordinate_system": "EPSG:4326",
"color_gradient": ["#0000FF", "#FF0000"],
"color_bound": [2.5, 5],
"opacity": 1,
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=11)
def test_icon_viz(self, host, port):
url = "http://" + host + ":" + port + "/icon_viz"
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
png_path = dir_path + "/taxi.png"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
'width': 1024,
'height': 896,
'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],
'coordinate_system': 'EPSG:4326',
'icon_path': png_path
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=12)
def test_fishnetmap(self, host, port):
url = "http://" + host + ":" + port + "/fishnetmap"
payload = {
"scope": SCOPE,
"sql": "select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))".format(table_name),
"params": {
"width": 1024,
"height": 896,
"bounding_box": [-80.37976, 35.191296, -70.714099, 45.897445],
"color_gradient": ["#0000FF", "#FF0000"],
"cell_size": 4,
"cell_spacing": 1,
"opacity": 1.0,
"coordinate_system": "EPSG:4326",
"aggregation_type": "sum"
}
}
r = requests.post(url=url, json=payload)
assert r.status_code == 200
print(r.text)
# assert r.json()["result"] is not None
@pytest.mark.run(order=13)
def test_drop_table(self, host, port):
url = "http://" + host + ":" + port + '/query'
sql1 = "drop table if exists {}".format(table_name)
sql2 = "drop table if exists {}".format(original_table_name)
payload1 = {
"scope": SCOPE,
"sql": sql1,
"collect_result": "0"
}
payload2 = {
"scope": SCOPE,
"sql": sql2,
"collect_result": "0"
}
r = requests.post(url=url, json=payload1)
print(r.text)
assert r.status_code == 200
r = requests.post(url=url, json=payload2)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=14)
def test_command(self, host, port):
url = "http://" + host + ":" + port + '/command'
command = """
from __future__ import print_function
import sys
from random import random
from operator import add
partitions = 2
n = 100000 * partitions
def f(_):
x = random() * 2 - 1
y = random() * 2 - 1
return 1 if x ** 2 + y ** 2 <= 1 else 0
count = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)
print("Pi is roughly %f" % (4.0 * count / n))
"""
payload = {
"scope": SCOPE,
"command": command
}
r = requests.post(url=url, json=payload)
print(r.text)
assert r.status_code == 200
@pytest.mark.run(order=15)
def test_remove_scope(self, host, port):
scope = SCOPE
url = "http://" + host + ":" + port + "/scope/" + scope
r = requests.delete(url=url)
print(r.text)
assert r.status_code == 200
|
normal
|
{
"blob_id": "65a9f732fc8c7b9c63f6ef0d7b2172bb4138a895",
"index": 2761,
"step-1": "<mask token>\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n <mask token>\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-2": "<mask token>\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n <mask token>\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n <mask token>\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-3": "<mask token>\n\n\ndef _get_line_count(file):\n with open(file, 'r') as f:\n return len(f.readlines())\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql = 'select count(*) as num_rows from {}'.format(original_table_name)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path\n ) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n payload = {'scope': SCOPE, 'sql':\n \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\"\n .format(table_name, original_table_name), 'collect_result': '0'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-4": "<mask token>\noriginal_table_name = 'raw_data'\ntable_name = 'nyctaxi'\ncsv_path = (\n '/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv')\nSCOPE = 'nyc_taxi'\n\n\ndef _get_line_count(file):\n with open(file, 'r') as f:\n return len(f.readlines())\n\n\nclass TestScope:\n\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = 'http://' + host + ':' + port + '/scope'\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = 'http://' + host + ':' + port + '/loadfile'\n payload = {'scope': SCOPE, 'tables': [{'name': original_table_name,\n 'format': 'csv', 'path': csv_path, 'options': {'header': 'True',\n 'delimiter': ','}, 'schema': [{'VendorID': 'string'}, {\n 'tpep_pickup_datetime': 'string'}, {'tpep_dropoff_datetime':\n 'string'}, {'passenger_count': 'long'}, {'trip_distance':\n 'double'}, {'pickup_longitude': 'double'}, {'pickup_latitude':\n 'double'}, {'dropoff_longitude': 'double'}, {'dropoff_latitude':\n 'double'}, {'fare_amount': 'double'}, {'tip_amount': 'double'},\n {'total_amount': 'double'}, {'buildingid_pickup': 'long'}, {\n 'buildingid_dropoff': 'long'}, {'buildingtext_pickup': 'string'\n }, {'buildingtext_dropoff': 'string'}]}]}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = ('http://' + host + ':' + port +\n '/table/schema?table={}&scope={}'.format(original_table_name,\n SCOPE))\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql = 'select count(*) as num_rows from {}'.format(original_table_name)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path\n ) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n limit = 1\n sql = 'select * from {} limit {}'.format(original_table_name, limit)\n payload = {'scope': SCOPE, 'sql': sql, 'collect_result': '1'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n payload = {'scope': SCOPE, 'sql':\n \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\"\n .format(table_name, original_table_name), 'collect_result': '0'}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'point_color': '#2DEF4A',\n 'point_size': 3, 'opacity': 0.5}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = 'http://' + host + ':' + port + '/weighted_pointmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'color_bound': [0, 2],\n 'size_bound': [0, 10], 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = 'http://' + host + ':' + port + '/heatmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'map_zoom_level': 10,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = 'http://' + host + ':' + port + '/choroplethmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'coordinate_system': 'EPSG:4326', 'color_gradient': ['#0000FF',\n '#FF0000'], 'color_bound': [2.5, 5], 'opacity': 1,\n 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = 'http://' + host + ':' + port + '/icon_viz'\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + '/taxi.png'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326', 'icon_path': png_path}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = 'http://' + host + ':' + port + '/fishnetmap'\n payload = {'scope': SCOPE, 'sql':\n \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\"\n .format(table_name), 'params': {'width': 1024, 'height': 896,\n 'bounding_box': [-80.37976, 35.191296, -70.714099, 45.897445],\n 'color_gradient': ['#0000FF', '#FF0000'], 'cell_size': 4,\n 'cell_spacing': 1, 'opacity': 1.0, 'coordinate_system':\n 'EPSG:4326', 'aggregation_type': 'sum'}}\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = 'http://' + host + ':' + port + '/query'\n sql1 = 'drop table if exists {}'.format(table_name)\n sql2 = 'drop table if exists {}'.format(original_table_name)\n payload1 = {'scope': SCOPE, 'sql': sql1, 'collect_result': '0'}\n payload2 = {'scope': SCOPE, 'sql': sql2, 'collect_result': '0'}\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = 'http://' + host + ':' + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {'scope': SCOPE, 'command': command}\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = 'http://' + host + ':' + port + '/scope/' + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-5": "\"\"\"\nCopyright (C) 2019-2020 Zilliz. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport pytest\nimport requests\n\noriginal_table_name = \"raw_data\"\ntable_name = \"nyctaxi\"\ncsv_path = \"/arctern/gui/server/arctern_server/data/0_5M_nyc_taxi_and_building.csv\"\nSCOPE = \"nyc_taxi\"\n\ndef _get_line_count(file):\n with open(file, \"r\") as f:\n return len(f.readlines())\n\nclass TestScope():\n @pytest.mark.run(order=1)\n def test_create_scope(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/scope\"\n r = requests.post(url=url)\n print(r.text)\n assert r.status_code == 200\n global SCOPE # pylint: disable=global-statement\n SCOPE = r.json()['scope']\n\n @pytest.mark.run(order=2)\n def test_load_file(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/loadfile\"\n payload = {\n \"scope\": SCOPE,\n \"tables\": [\n {\n \"name\": original_table_name,\n \"format\": \"csv\",\n \"path\": csv_path,\n \"options\": {\n \"header\": \"True\",\n \"delimiter\": \",\"\n },\n \"schema\": [\n {\"VendorID\": \"string\"},\n {\"tpep_pickup_datetime\": \"string\"},\n {\"tpep_dropoff_datetime\": \"string\"},\n {\"passenger_count\": \"long\"},\n {\"trip_distance\": \"double\"},\n {\"pickup_longitude\": \"double\"},\n {\"pickup_latitude\": \"double\"},\n {\"dropoff_longitude\": \"double\"},\n {\"dropoff_latitude\": \"double\"},\n {\"fare_amount\": \"double\"},\n {\"tip_amount\": \"double\"},\n {\"total_amount\": \"double\"},\n {\"buildingid_pickup\": \"long\"},\n {\"buildingid_dropoff\": \"long\"},\n {\"buildingtext_pickup\": \"string\"},\n {\"buildingtext_dropoff\": \"string\"}\n ]\n }\n ]\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n # TODO: neccessary for /savefile? not convenient for cleaning up\n\n @pytest.mark.run(order=3)\n def test_table_schema(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/table/schema?table={}&scope={}\".format(original_table_name, SCOPE)\n r = requests.get(url=url)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['schema']) == 16\n\n @pytest.mark.run(order=4)\n def test_num_rows(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n sql = \"select count(*) as num_rows from {}\".format(original_table_name)\n payload = {\n \"scope\": SCOPE,\n \"sql\": sql,\n \"collect_result\": \"1\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == 1\n assert r.json()['result'][0]['num_rows'] == _get_line_count(csv_path) - 1\n\n @pytest.mark.run(order=5)\n def test_query(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n limit = 1\n sql = \"select * from {} limit {}\".format(original_table_name, limit)\n payload = {\n \"scope\": SCOPE,\n \"sql\": sql,\n \"collect_result\": \"1\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n assert len(r.json()['result']) == limit\n\n @pytest.mark.run(order=6)\n def test_create_table(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/query\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"create table {} as (select VendorID, to_timestamp(tpep_pickup_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_pickup_datetime, to_timestamp(tpep_dropoff_datetime,'yyyy-MM-dd HH:mm:ss XXXXX') as tpep_dropoff_datetime, passenger_count, trip_distance, pickup_longitude, pickup_latitude, dropoff_longitude, dropoff_latitude, fare_amount, tip_amount, total_amount, buildingid_pickup, buildingid_dropoff, buildingtext_pickup, buildingtext_dropoff from {} where (pickup_longitude between -180 and 180) and (pickup_latitude between -90 and 90) and (dropoff_longitude between -180 and 180) and (dropoff_latitude between -90 and 90))\".format(table_name, original_table_name),\n \"collect_result\": \"0\"\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=7)\n def test_pointmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/pointmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"point_color\": \"#2DEF4A\",\n \"point_size\": 3,\n \"opacity\": 0.5\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=8)\n def test_weighted_pointmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/weighted_pointmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, tip_amount as c, fare_amount as s from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"color_bound\": [0, 2],\n \"size_bound\": [0, 10],\n \"opacity\": 1.0,\n \"coordinate_system\": \"EPSG:4326\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=9)\n def test_heatmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/heatmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"map_zoom_level\": 10,\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=10)\n def test_choroplethmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/choroplethmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_GeomFromText(buildingtext_dropoff) as wkt, passenger_count as w from {} where (buildingtext_dropoff!='')\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"coordinate_system\": \"EPSG:4326\",\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"color_bound\": [2.5, 5],\n \"opacity\": 1,\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=11)\n def test_icon_viz(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/icon_viz\"\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n png_path = dir_path + \"/taxi.png\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n 'width': 1024,\n 'height': 896,\n 'bounding_box': [-75.37976, 40.191296, -71.714099, 41.897445],\n 'coordinate_system': 'EPSG:4326',\n 'icon_path': png_path\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=12)\n def test_fishnetmap(self, host, port):\n url = \"http://\" + host + \":\" + port + \"/fishnetmap\"\n payload = {\n \"scope\": SCOPE,\n \"sql\": \"select ST_Point(pickup_longitude, pickup_latitude) as point, passenger_count as w from {} where ST_Within(ST_Point(pickup_longitude, pickup_latitude), ST_GeomFromText('POLYGON ((-73.998427 40.730309, -73.954348 40.730309, -73.954348 40.780816 ,-73.998427 40.780816, -73.998427 40.730309))'))\".format(table_name),\n \"params\": {\n \"width\": 1024,\n \"height\": 896,\n \"bounding_box\": [-80.37976, 35.191296, -70.714099, 45.897445],\n \"color_gradient\": [\"#0000FF\", \"#FF0000\"],\n \"cell_size\": 4,\n \"cell_spacing\": 1,\n \"opacity\": 1.0,\n \"coordinate_system\": \"EPSG:4326\",\n \"aggregation_type\": \"sum\"\n }\n }\n r = requests.post(url=url, json=payload)\n assert r.status_code == 200\n print(r.text)\n # assert r.json()[\"result\"] is not None\n\n @pytest.mark.run(order=13)\n def test_drop_table(self, host, port):\n url = \"http://\" + host + \":\" + port + '/query'\n sql1 = \"drop table if exists {}\".format(table_name)\n sql2 = \"drop table if exists {}\".format(original_table_name)\n payload1 = {\n \"scope\": SCOPE,\n \"sql\": sql1,\n \"collect_result\": \"0\"\n }\n payload2 = {\n \"scope\": SCOPE,\n \"sql\": sql2,\n \"collect_result\": \"0\"\n }\n r = requests.post(url=url, json=payload1)\n print(r.text)\n assert r.status_code == 200\n r = requests.post(url=url, json=payload2)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=14)\n def test_command(self, host, port):\n url = \"http://\" + host + \":\" + port + '/command'\n command = \"\"\"\nfrom __future__ import print_function\n\nimport sys\nfrom random import random\nfrom operator import add\n\npartitions = 2\nn = 100000 * partitions\n\ndef f(_):\n x = random() * 2 - 1\n y = random() * 2 - 1\n return 1 if x ** 2 + y ** 2 <= 1 else 0\n\ncount = spark.sparkContext.parallelize(range(1, n + 1), partitions).map(f).reduce(add)\nprint(\"Pi is roughly %f\" % (4.0 * count / n))\n \"\"\"\n payload = {\n \"scope\": SCOPE,\n \"command\": command\n }\n r = requests.post(url=url, json=payload)\n print(r.text)\n assert r.status_code == 200\n\n @pytest.mark.run(order=15)\n def test_remove_scope(self, host, port):\n scope = SCOPE\n url = \"http://\" + host + \":\" + port + \"/scope/\" + scope\n r = requests.delete(url=url)\n print(r.text)\n assert r.status_code == 200\n",
"step-ids": [
10,
14,
17,
18,
20
]
}
|
[
10,
14,
17,
18,
20
] |
# nomer7
import no2_modul2 # Atau apapun file-nya yang kamu buat tadi
class MhsTIF(no2_modul2.Mahasiswa): # perhatikan class induknya : Mahasiswa
"""Class MhsTIF yang dibangun dari class Mahasiswa"""
def kataKanPy(self):
print('Python is cool.')
"Apakah metode / state itu berasal dari class Manusia, Mahasiswa, atau MhsTIF?
"Jawab :
"Metoode atau state yang muncul berasal dari semua class baik Manusia, Mahasiswa, atau MhsTIF.
"Ini karena MhsTIF yang merupakan anak class dari Mahasiswa, dan itu membuat MhsTIF mewarisi
"semua properties dari Mahasiswa dan Manusia.
|
normal
|
{
"blob_id": "b54f47de85fe95d47a1b1be921997ad86d7b450d",
"index": 8777,
"step-1": "# nomer7\r\n\r\nimport no2_modul2 # Atau apapun file-nya yang kamu buat tadi\r\n\r\nclass MhsTIF(no2_modul2.Mahasiswa): # perhatikan class induknya : Mahasiswa\r\n \"\"\"Class MhsTIF yang dibangun dari class Mahasiswa\"\"\"\r\n def kataKanPy(self):\r\n print('Python is cool.')\r\n\r\n\"Apakah metode / state itu berasal dari class Manusia, Mahasiswa, atau MhsTIF?\r\n \"Jawab :\r\n \"Metoode atau state yang muncul berasal dari semua class baik Manusia, Mahasiswa, atau MhsTIF.\r\n \"Ini karena MhsTIF yang merupakan anak class dari Mahasiswa, dan itu membuat MhsTIF mewarisi\r\n \"semua properties dari Mahasiswa dan Manusia.\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_read_stats(isize=400):
stats = readstatistics.ReadStatistics(None)
stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)
stats.orientations = ['+-']
return stats
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_read_stats(isize=400):
stats = readstatistics.ReadStatistics(None)
stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)
stats.orientations = ['+-']
return stats
def test_gt(genome_source, genome_source_deletion):
genome_source_deletion, deletion_length = genome_source_deletion
refseq = genome_source.names_to_contigs['chr2']
altseq = genome_source_deletion.names_to_contigs['chr2']
print('')
coverage = 50
read_length = 150
ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq) / (
read_length * 2) * coverage))
alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq) / (
read_length * 2) * coverage))
print(len(ref_reads), len(alt_reads))
combined_reads = []
for i, _, pair in ref_reads:
if 4000 - 500 < i < 4000 + 500 + deletion_length:
pair._allele = 'ref'
combined_reads.append(pair)
for i, _, pair in alt_reads:
if 4000 - 500 < i < 4500:
pair._allele = 'alt'
combined_reads.append(pair)
for pair in combined_reads:
pair.realign([genome_source], [genome_source_deletion])
ref_breakpoints = [Locus('chr2', 4000, 4000, '+'), Locus('chr2', 4000 +
deletion_length, 4000 + deletion_length, '+')]
alt_breakpoints = [Locus('chr2', 4000, 4000, '+')]
ref_count, alt_count = genotyping.assign_reads_to_alleles(combined_reads,
ref_breakpoints, alt_breakpoints, get_read_stats())
print(':::::', ref_count, alt_count)
<|reserved_special_token_1|>
import collections
import numpy
import pytest
import random
import conftest
from svviz2.io import readstatistics
from svviz2.remap import genotyping
from svviz2.utility.intervals import Locus
def get_read_stats(isize=400):
stats = readstatistics.ReadStatistics(None)
stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)
stats.orientations = ['+-']
return stats
def test_gt(genome_source, genome_source_deletion):
genome_source_deletion, deletion_length = genome_source_deletion
refseq = genome_source.names_to_contigs['chr2']
altseq = genome_source_deletion.names_to_contigs['chr2']
print('')
coverage = 50
read_length = 150
ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq) / (
read_length * 2) * coverage))
alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq) / (
read_length * 2) * coverage))
print(len(ref_reads), len(alt_reads))
combined_reads = []
for i, _, pair in ref_reads:
if 4000 - 500 < i < 4000 + 500 + deletion_length:
pair._allele = 'ref'
combined_reads.append(pair)
for i, _, pair in alt_reads:
if 4000 - 500 < i < 4500:
pair._allele = 'alt'
combined_reads.append(pair)
for pair in combined_reads:
pair.realign([genome_source], [genome_source_deletion])
ref_breakpoints = [Locus('chr2', 4000, 4000, '+'), Locus('chr2', 4000 +
deletion_length, 4000 + deletion_length, '+')]
alt_breakpoints = [Locus('chr2', 4000, 4000, '+')]
ref_count, alt_count = genotyping.assign_reads_to_alleles(combined_reads,
ref_breakpoints, alt_breakpoints, get_read_stats())
print(':::::', ref_count, alt_count)
<|reserved_special_token_1|>
import collections
import numpy
import pytest
import random
import conftest
from svviz2.io import readstatistics
from svviz2.remap import genotyping
from svviz2.utility.intervals import Locus
def get_read_stats(isize=400):
stats = readstatistics.ReadStatistics(None)
stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)
stats.orientations = ["+-"]
return stats
def test_gt(genome_source, genome_source_deletion):
genome_source_deletion, deletion_length = genome_source_deletion
refseq = genome_source.names_to_contigs["chr2"]
altseq = genome_source_deletion.names_to_contigs["chr2"]
print("")
coverage = 50
read_length = 150
ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq)/(read_length*2)*coverage))
alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq)/(read_length*2)*coverage))
print(len(ref_reads), len(alt_reads))
combined_reads = []
for i, _, pair in ref_reads:
if 4000-500 < i < 4000+500+deletion_length:
pair._allele = "ref"
combined_reads.append(pair)
for i, _, pair in alt_reads:
if 4000-500 < i < 4500:
pair._allele = "alt"
combined_reads.append(pair)
for pair in combined_reads:
pair.realign([genome_source], [genome_source_deletion])
ref_breakpoints = [Locus("chr2", 4000, 4000, "+"),
Locus("chr2", 4000+deletion_length, 4000+deletion_length, "+")]
alt_breakpoints = [Locus("chr2", 4000, 4000, "+")]
ref_count, alt_count = genotyping.assign_reads_to_alleles(
combined_reads, ref_breakpoints, alt_breakpoints, get_read_stats())
print(":::::", ref_count, alt_count)
|
flexible
|
{
"blob_id": "97a362fc65731bb8fc3743c49a669b4cd3f0e155",
"index": 9426,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_read_stats(isize=400):\n stats = readstatistics.ReadStatistics(None)\n stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)\n stats.orientations = ['+-']\n return stats\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_read_stats(isize=400):\n stats = readstatistics.ReadStatistics(None)\n stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)\n stats.orientations = ['+-']\n return stats\n\n\ndef test_gt(genome_source, genome_source_deletion):\n genome_source_deletion, deletion_length = genome_source_deletion\n refseq = genome_source.names_to_contigs['chr2']\n altseq = genome_source_deletion.names_to_contigs['chr2']\n print('')\n coverage = 50\n read_length = 150\n ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq) / (\n read_length * 2) * coverage))\n alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq) / (\n read_length * 2) * coverage))\n print(len(ref_reads), len(alt_reads))\n combined_reads = []\n for i, _, pair in ref_reads:\n if 4000 - 500 < i < 4000 + 500 + deletion_length:\n pair._allele = 'ref'\n combined_reads.append(pair)\n for i, _, pair in alt_reads:\n if 4000 - 500 < i < 4500:\n pair._allele = 'alt'\n combined_reads.append(pair)\n for pair in combined_reads:\n pair.realign([genome_source], [genome_source_deletion])\n ref_breakpoints = [Locus('chr2', 4000, 4000, '+'), Locus('chr2', 4000 +\n deletion_length, 4000 + deletion_length, '+')]\n alt_breakpoints = [Locus('chr2', 4000, 4000, '+')]\n ref_count, alt_count = genotyping.assign_reads_to_alleles(combined_reads,\n ref_breakpoints, alt_breakpoints, get_read_stats())\n print(':::::', ref_count, alt_count)\n",
"step-4": "import collections\nimport numpy\nimport pytest\nimport random\nimport conftest\nfrom svviz2.io import readstatistics\nfrom svviz2.remap import genotyping\nfrom svviz2.utility.intervals import Locus\n\n\ndef get_read_stats(isize=400):\n stats = readstatistics.ReadStatistics(None)\n stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)\n stats.orientations = ['+-']\n return stats\n\n\ndef test_gt(genome_source, genome_source_deletion):\n genome_source_deletion, deletion_length = genome_source_deletion\n refseq = genome_source.names_to_contigs['chr2']\n altseq = genome_source_deletion.names_to_contigs['chr2']\n print('')\n coverage = 50\n read_length = 150\n ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq) / (\n read_length * 2) * coverage))\n alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq) / (\n read_length * 2) * coverage))\n print(len(ref_reads), len(alt_reads))\n combined_reads = []\n for i, _, pair in ref_reads:\n if 4000 - 500 < i < 4000 + 500 + deletion_length:\n pair._allele = 'ref'\n combined_reads.append(pair)\n for i, _, pair in alt_reads:\n if 4000 - 500 < i < 4500:\n pair._allele = 'alt'\n combined_reads.append(pair)\n for pair in combined_reads:\n pair.realign([genome_source], [genome_source_deletion])\n ref_breakpoints = [Locus('chr2', 4000, 4000, '+'), Locus('chr2', 4000 +\n deletion_length, 4000 + deletion_length, '+')]\n alt_breakpoints = [Locus('chr2', 4000, 4000, '+')]\n ref_count, alt_count = genotyping.assign_reads_to_alleles(combined_reads,\n ref_breakpoints, alt_breakpoints, get_read_stats())\n print(':::::', ref_count, alt_count)\n",
"step-5": "import collections\nimport numpy\nimport pytest\nimport random\n\nimport conftest\nfrom svviz2.io import readstatistics\nfrom svviz2.remap import genotyping\nfrom svviz2.utility.intervals import Locus\n\ndef get_read_stats(isize=400):\n stats = readstatistics.ReadStatistics(None)\n stats.insertSizes = numpy.random.normal(400, 20, 2000).astype(int)\n stats.orientations = [\"+-\"]\n return stats\n\ndef test_gt(genome_source, genome_source_deletion):\n genome_source_deletion, deletion_length = genome_source_deletion\n\n refseq = genome_source.names_to_contigs[\"chr2\"]\n altseq = genome_source_deletion.names_to_contigs[\"chr2\"]\n\n print(\"\")\n\n coverage = 50\n read_length = 150\n ref_reads = conftest.simulate_read_pairs(refseq, int(len(refseq)/(read_length*2)*coverage))\n alt_reads = conftest.simulate_read_pairs(altseq, int(len(altseq)/(read_length*2)*coverage))\n print(len(ref_reads), len(alt_reads))\n\n combined_reads = []\n\n for i, _, pair in ref_reads:\n if 4000-500 < i < 4000+500+deletion_length:\n pair._allele = \"ref\"\n combined_reads.append(pair)\n for i, _, pair in alt_reads:\n if 4000-500 < i < 4500:\n pair._allele = \"alt\"\n combined_reads.append(pair)\n\n for pair in combined_reads:\n pair.realign([genome_source], [genome_source_deletion])\n\n ref_breakpoints = [Locus(\"chr2\", 4000, 4000, \"+\"),\n Locus(\"chr2\", 4000+deletion_length, 4000+deletion_length, \"+\")]\n alt_breakpoints = [Locus(\"chr2\", 4000, 4000, \"+\")]\n\n ref_count, alt_count = genotyping.assign_reads_to_alleles(\n combined_reads, ref_breakpoints, alt_breakpoints, get_read_stats())\n\n print(\":::::\", ref_count, alt_count)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# __ __ __ ______ __
# / | / | / | / \ / |
# $$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
# $$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
# $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
# $$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
# $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
#$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
#$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
#made with http://patorjk.com/software/taag/
# Xtern Intern Techincal interview
# Josh Martin
# contact@cjoshmartin.com
# 2016
import json
import uuid
import random
import time
## Location of file
filename ="data.json"
def newGuess():
# makes new numbers each time alled
return random.randint(0,10)
# init guess
correctGuess = newGuess()
def newUser():
# new init of a user
userid = str(uuid.uuid1())
data={userid:{'coins':0,'guess':0}}
with open(filename,'w') as f:
json.dump(data,f)
return userid
def OpenJson():
# opens the json file satisfied at the top of the document
with open(filename,'r+') as f:
data =json.load(f)
return data
def AddACoin(userid):
# adds a coin to current user
data = OpenJson()
tmp=data[userid]['coins']
tmp+=1
data[userid]['coins']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
def GuessCount(userid):
# keeps track of guess
data = OpenJson()
tmp=data[userid]['guess']
tmp+=1
data[userid]['guess']=tmp
JsonFile=open(filename,"w+")
JsonFile.write(json.dumps(data))
JsonFile.close()
print 'that is {} trys in total.'.format(tmp)
def GetCoins(userid):
# gets current amount of coins
getamount =OpenJson()[userid]['coins']
return getamount
def HandleGuess(userid,guess):
# returns a Boolean value based off if the guess is right or not
print 'the current user, "{}" has guessed: {}'.format(userid,guess)
if guess == correctGuess:
print 'the user,"{}" has guessed correctly and now has {} XternCoins.'.format(userid,(GetCoins(userid)+1))
return True
print 'the user has nt guessed right, please try again.'
return False
def StartGuessing():
user =newUser()
while True:
print("""
__ __ __ ______ __
/ | / | / | / \ / |
$$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______
$$ \/$$// $$ | / \ / \ / \ $$ | $$/ / \ / |/ \ / |
$$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/
$$$$ \ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \
$$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \__/ |$$ \__$$ |$$ |$$ | $$ | $$$$$$ |
$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/
$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/
""") #cheap "gui" to clear the screen a bit and look pretty
print 'the current user, "{}" has {} XternCoins'.format(user,OpenJson()[user]['coins'])
guess =HandleGuess(user,random.randint(0,10))
if guess :
AddACoin(user)
correctGuess=newGuess() # makes a new number to guess
GuessCount(user)
time.sleep(3) # makes program readable to humans not just computers
|
normal
|
{
"blob_id": "ae72d832039f36149988da02d8a4174d80a4ecfb",
"index": 2350,
"step-1": "\n # __ __ __ ______ __\n# / | / | / | / \\ / |\n# $$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______\n# $$ \\/$$// $$ | / \\ / \\ / \\ $$ | $$/ / \\ / |/ \\ / |\n # $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/\n# $$$$ \\ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \\\n# $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \\__/ |$$ \\__$$ |$$ |$$ | $$ | $$$$$$ |\n#$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/\n#$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/\n\n#made with http://patorjk.com/software/taag/\n\n# Xtern Intern Techincal interview\n# Josh Martin\n# contact@cjoshmartin.com\n# 2016\n\nimport json\nimport uuid\nimport random\nimport time\n\n## Location of file\nfilename =\"data.json\"\n\ndef newGuess():\n # makes new numbers each time alled\n return random.randint(0,10)\n\n# init guess \ncorrectGuess = newGuess()\n\ndef newUser():\n # new init of a user\n userid = str(uuid.uuid1())\n data={userid:{'coins':0,'guess':0}}\n with open(filename,'w') as f:\n json.dump(data,f)\n return userid\ndef OpenJson():\n # opens the json file satisfied at the top of the document\n with open(filename,'r+') as f:\n data =json.load(f)\n return data\n \ndef AddACoin(userid):\n # adds a coin to current user\n data = OpenJson()\n tmp=data[userid]['coins']\n tmp+=1\n data[userid]['coins']=tmp\n JsonFile=open(filename,\"w+\")\n JsonFile.write(json.dumps(data))\n JsonFile.close()\n\ndef GuessCount(userid):\n # keeps track of guess\n data = OpenJson()\n tmp=data[userid]['guess']\n tmp+=1\n data[userid]['guess']=tmp\n JsonFile=open(filename,\"w+\")\n JsonFile.write(json.dumps(data))\n JsonFile.close()\n print 'that is {} trys in total.'.format(tmp)\n\ndef GetCoins(userid):\n # gets current amount of coins\n getamount =OpenJson()[userid]['coins']\n return getamount\n\ndef HandleGuess(userid,guess):\n # returns a Boolean value based off if the guess is right or not\n print 'the current user, \"{}\" has guessed: {}'.format(userid,guess)\n if guess == correctGuess:\n print 'the user,\"{}\" has guessed correctly and now has {} XternCoins.'.format(userid,(GetCoins(userid)+1))\n return True\n print 'the user has nt guessed right, please try again.'\n return False\n \ndef StartGuessing():\n user =newUser()\n while True:\n print(\"\"\" \n \n \n \n \n \n __ __ __ ______ __ \n/ | / | / | / \\ / | \n$$ | $$ |_$$ |_ ______ ______ _______ /$$$$$$ | ______ $$/ _______ _______ \n$$ \\/$$// $$ | / \\ / \\ / \\ $$ | $$/ / \\ / |/ \\ / |\n $$ $$< $$$$$$/ /$$$$$$ |/$$$$$$ |$$$$$$$ | $$ | /$$$$$$ |$$ |$$$$$$$ |/$$$$$$$/ \n $$$$ \\ $$ | __ $$ $$ |$$ | $$/ $$ | $$ | $$ | __ $$ | $$ |$$ |$$ | $$ |$$ \\ \n $$ /$$ | $$ |/ |$$$$$$$$/ $$ | $$ | $$ | $$ \\__/ |$$ \\__$$ |$$ |$$ | $$ | $$$$$$ |\n$$ | $$ | $$ $$/ $$ |$$ | $$ | $$ | $$ $$/ $$ $$/ $$ |$$ | $$ |/ $$/ \n$$/ $$/ $$$$/ $$$$$$$/ $$/ $$/ $$/ $$$$$$/ $$$$$$/ $$/ $$/ $$/ $$$$$$$/\n \n \n \n \n \n \n \n \n \n \n \"\"\") #cheap \"gui\" to clear the screen a bit and look pretty \n print 'the current user, \"{}\" has {} XternCoins'.format(user,OpenJson()[user]['coins'])\n guess =HandleGuess(user,random.randint(0,10))\n if guess :\n AddACoin(user)\n correctGuess=newGuess() # makes a new number to guess\n GuessCount(user)\n time.sleep(3) # makes program readable to humans not just computers\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def is_power(n):
if n == 0:
return 'not power of two'
if n & n - 1 == 0:
return 'power of 2'
return 'not power of 2'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def is_power(n):
if n == 0:
return 'not power of two'
if n & n - 1 == 0:
return 'power of 2'
return 'not power of 2'
if __name__ == '__main__':
input_number = int(input('enter the number : '))
print(is_power(input_number))
<|reserved_special_token_1|>
# Question : determine whether given number is power of 2
# logic : every no. of the form 2^i has bit represetntaion of the form :
# 2 -> 10 1->01
# 4 -> 100 3->011
# 8 -> 1000 7->0111
# 16 -> 10000 15->01111
# 32 -> 100000 31->011111
# ... and so on
# Thus there is a pattern here, ever predecessor of power of 2 has all 0 bits flipped and so as 1 bit itself
# Complexity : using bit manipulation it can be done in O(1) time
def is_power(n):
if n==0:
return 'not power of two'
if n & (n-1) == 0 :
return 'power of 2'
return 'not power of 2'
if __name__ == "__main__":
input_number = int(input('enter the number : '))
print(is_power(input_number))
|
flexible
|
{
"blob_id": "676aec735dd7441b0c481956ad18b012b8d98ea4",
"index": 8459,
"step-1": "<mask token>\n",
"step-2": "def is_power(n):\n if n == 0:\n return 'not power of two'\n if n & n - 1 == 0:\n return 'power of 2'\n return 'not power of 2'\n\n\n<mask token>\n",
"step-3": "def is_power(n):\n if n == 0:\n return 'not power of two'\n if n & n - 1 == 0:\n return 'power of 2'\n return 'not power of 2'\n\n\nif __name__ == '__main__':\n input_number = int(input('enter the number : '))\n print(is_power(input_number))\n",
"step-4": "# Question : determine whether given number is power of 2\n\n# logic : every no. of the form 2^i has bit represetntaion of the form : \n# 2 -> 10 1->01\n# 4 -> 100 3->011\n# 8 -> 1000 7->0111\n# 16 -> 10000 15->01111\n# 32 -> 100000 31->011111\n# ... and so on\n\n# Thus there is a pattern here, ever predecessor of power of 2 has all 0 bits flipped and so as 1 bit itself\n\n# Complexity : using bit manipulation it can be done in O(1) time\n\n\ndef is_power(n):\n if n==0:\n return 'not power of two'\n if n & (n-1) == 0 :\n return 'power of 2'\n return 'not power of 2'\n\n\nif __name__ == \"__main__\":\n input_number = int(input('enter the number : '))\n print(is_power(input_number))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PidorWeekly:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
<|reserved_special_token_0|>
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}"
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
<|reserved_special_token_0|>
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=
USER_CACHE_EXPIRE)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PidorWeekly:
lock = Lock()
re_words = re.compile(
'\\b(ге[йяи]|геев|анал|аналы|аналь\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\S*|член\\S*|пизд\\S+|гомос\\S+|гомик\\S*|\\S+сексуал\\S*|климов\\S*|педерас\\S+|пидор\\S*|пидар\\S*|педик\\S+|подвор\\S+|iphone\\S*|айфон\\S*|samsung|самсунг\\S*|смузи|барбер\\S*|рокет\\S*|хипстер\\S*|лгбт\\S*|бабочк\\S+|м[ао]к[ао]син\\S*|ахтунг\\S*|толерант\\S+|политкорр?ект\\S+|стрижк\\S+|бород\\S+|аниме\\S*|саратов\\S*|фемк\\S+|\\S+изм\\S*|dtf|дтф|в[еэ]йп\\S*|гироскутер\\S*|мизог\\S+|козел|козл\\S+|муда[кч]\\S*|сволоч\\S+|ресторан\\S*|кача[лт]\\S+|мыло|читер\\S*|читы?|культур\\S+|сра[тл]\\S+|насра[тл]\\S+|гад\\S*|блогг?ер\\S*)\\b'
, re.IGNORECASE)
re_inside = re.compile('п[еи]д[оа]р\\S*', re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}"
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=
USER_CACHE_EXPIRE)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = get_logger(__name__)
class PidorWeekly:
lock = Lock()
re_words = re.compile(
'\\b(ге[йяи]|геев|анал|аналы|аналь\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\S*|член\\S*|пизд\\S+|гомос\\S+|гомик\\S*|\\S+сексуал\\S*|климов\\S*|педерас\\S+|пидор\\S*|пидар\\S*|педик\\S+|подвор\\S+|iphone\\S*|айфон\\S*|samsung|самсунг\\S*|смузи|барбер\\S*|рокет\\S*|хипстер\\S*|лгбт\\S*|бабочк\\S+|м[ао]к[ао]син\\S*|ахтунг\\S*|толерант\\S+|политкорр?ект\\S+|стрижк\\S+|бород\\S+|аниме\\S*|саратов\\S*|фемк\\S+|\\S+изм\\S*|dtf|дтф|в[еэ]йп\\S*|гироскутер\\S*|мизог\\S+|козел|козл\\S+|муда[кч]\\S*|сволоч\\S+|ресторан\\S*|кача[лт]\\S+|мыло|читер\\S*|читы?|культур\\S+|сра[тл]\\S+|насра[тл]\\S+|гад\\S*|блогг?ер\\S*)\\b'
, re.IGNORECASE)
re_inside = re.compile('п[еи]д[оа]р\\S*', re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}"
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=
USER_CACHE_EXPIRE)
<|reserved_special_token_1|>
import random
import re
from datetime import datetime, timedelta
from threading import Lock
from telegram.ext import run_async
from src.models.user import UserDB
from src.models.user_stat import UserStat
from src.utils.cache import cache, USER_CACHE_EXPIRE
from src.utils.logger_helpers import get_logger
logger = get_logger(__name__)
class PidorWeekly:
lock = Lock()
re_words = re.compile(
'\\b(ге[йяи]|геев|анал|аналы|аналь\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\S*|член\\S*|пизд\\S+|гомос\\S+|гомик\\S*|\\S+сексуал\\S*|климов\\S*|педерас\\S+|пидор\\S*|пидар\\S*|педик\\S+|подвор\\S+|iphone\\S*|айфон\\S*|samsung|самсунг\\S*|смузи|барбер\\S*|рокет\\S*|хипстер\\S*|лгбт\\S*|бабочк\\S+|м[ао]к[ао]син\\S*|ахтунг\\S*|толерант\\S+|политкорр?ект\\S+|стрижк\\S+|бород\\S+|аниме\\S*|саратов\\S*|фемк\\S+|\\S+изм\\S*|dtf|дтф|в[еэ]йп\\S*|гироскутер\\S*|мизог\\S+|козел|козл\\S+|муда[кч]\\S*|сволоч\\S+|ресторан\\S*|кача[лт]\\S+|мыло|читер\\S*|читы?|культур\\S+|сра[тл]\\S+|насра[тл]\\S+|гад\\S*|блогг?ер\\S*)\\b'
, re.IGNORECASE)
re_inside = re.compile('п[еи]д[оа]р\\S*', re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday(
) if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}"
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=
USER_CACHE_EXPIRE)
<|reserved_special_token_1|>
import random
import re
from datetime import datetime, timedelta
from threading import Lock
from telegram.ext import run_async
from src.models.user import UserDB
from src.models.user_stat import UserStat
from src.utils.cache import cache, USER_CACHE_EXPIRE
from src.utils.logger_helpers import get_logger
logger = get_logger(__name__)
class PidorWeekly:
lock = Lock()
re_words = re.compile(
r"\b(ге[йяи]|геев|анал|аналы|аналь\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\S*|член\S*|пизд\S+|гомос\S+|гомик\S*|\S+сексуал\S*|климов\S*|педерас\S+|пидор\S*|пидар\S*|педик\S+|подвор\S+|iphone\S*|айфон\S*|samsung|самсунг\S*|смузи|барбер\S*|рокет\S*|хипстер\S*|лгбт\S*|бабочк\S+|м[ао]к[ао]син\S*|ахтунг\S*|толерант\S+|политкорр?ект\S+|стрижк\S+|бород\S+|аниме\S*|саратов\S*|фемк\S+|\S+изм\S*|dtf|дтф|в[еэ]йп\S*|гироскутер\S*|мизог\S+|козел|козл\S+|муда[кч]\S*|сволоч\S+|ресторан\S*|кача[лт]\S+|мыло|читер\S*|читы?|культур\S+|сра[тл]\S+|насра[тл]\S+|гад\S*|блогг?ер\S*)\b",
re.IGNORECASE)
re_inside = re.compile(r"п[еи]д[оа]р\S*", re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
# подсчитаем всех по отношению пидор-слов к общему количеству слов этого участника
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
# учитываем только тек, кто написал от 30 сообщений
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f'pidorweekly:{monday.strftime("%Y%m%d")}:{cid}'
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=USER_CACHE_EXPIRE)
|
flexible
|
{
"blob_id": "109ca06685eece74034f77a98b1d7172a17aca21",
"index": 7469,
"step-1": "<mask token>\n\n\nclass PidorWeekly:\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n <mask token>\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n <mask token>\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-2": "<mask token>\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-3": "<mask token>\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-4": "import random\nimport re\nfrom datetime import datetime, timedelta\nfrom threading import Lock\nfrom telegram.ext import run_async\nfrom src.models.user import UserDB\nfrom src.models.user_stat import UserStat\nfrom src.utils.cache import cache, USER_CACHE_EXPIRE\nfrom src.utils.logger_helpers import get_logger\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-5": "import random\nimport re\nfrom datetime import datetime, timedelta\nfrom threading import Lock\n\nfrom telegram.ext import run_async\n\nfrom src.models.user import UserDB\nfrom src.models.user_stat import UserStat\nfrom src.utils.cache import cache, USER_CACHE_EXPIRE\nfrom src.utils.logger_helpers import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n r\"\\b(ге[йяи]|геев|анал|аналы|аналь\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\S*|член\\S*|пизд\\S+|гомос\\S+|гомик\\S*|\\S+сексуал\\S*|климов\\S*|педерас\\S+|пидор\\S*|пидар\\S*|педик\\S+|подвор\\S+|iphone\\S*|айфон\\S*|samsung|самсунг\\S*|смузи|барбер\\S*|рокет\\S*|хипстер\\S*|лгбт\\S*|бабочк\\S+|м[ао]к[ао]син\\S*|ахтунг\\S*|толерант\\S+|политкорр?ект\\S+|стрижк\\S+|бород\\S+|аниме\\S*|саратов\\S*|фемк\\S+|\\S+изм\\S*|dtf|дтф|в[еэ]йп\\S*|гироскутер\\S*|мизог\\S+|козел|козл\\S+|муда[кч]\\S*|сволоч\\S+|ресторан\\S*|кача[лт]\\S+|мыло|читер\\S*|читы?|культур\\S+|сра[тл]\\S+|насра[тл]\\S+|гад\\S*|блогг?ер\\S*)\\b\",\n re.IGNORECASE)\n re_inside = re.compile(r\"п[еи]д[оа]р\\S*\", re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n\n # подсчитаем всех по отношению пидор-слов к общему количеству слов этого участника\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n # учитываем только тек, кто написал от 30 сообщений\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f'pidorweekly:{monday.strftime(\"%Y%m%d\")}:{cid}'\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=USER_CACHE_EXPIRE)\n",
"step-ids": [
9,
12,
13,
14,
15
]
}
|
[
9,
12,
13,
14,
15
] |
from django.urls import path
from . import views
app_name = 'restuarant'
urlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',
views.restuarent, name='restuarant'), path('login/restaurant/', views.
restLogin, name='rlogin'), path('register/restaurant/', views.
restRegister, name='rregister'), path('profile/restaurant/', views.
restaurantProfile, name='rprofile'), path('restaurant/create/', views.
createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',
views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',
views.orderlist, name='orderlist'), path('restaurant/menu/', views.
menuManipulation, name='mmenu'), path('logout/', views.Logout, name=
'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=
'menu'), path('checkout/', views.checkout, name='checkout'), path(
'profile/change_password/', views.change_password, name='change_password')]
|
normal
|
{
"blob_id": "63830a3c09a2d0a267b030a336062d5e95b9a71a",
"index": 3308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'restuarant'\nurlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',\n views.restuarent, name='restuarant'), path('login/restaurant/', views.\n restLogin, name='rlogin'), path('register/restaurant/', views.\n restRegister, name='rregister'), path('profile/restaurant/', views.\n restaurantProfile, name='rprofile'), path('restaurant/create/', views.\n createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',\n views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',\n views.orderlist, name='orderlist'), path('restaurant/menu/', views.\n menuManipulation, name='mmenu'), path('logout/', views.Logout, name=\n 'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=\n 'menu'), path('checkout/', views.checkout, name='checkout'), path(\n 'profile/change_password/', views.change_password, name='change_password')]\n",
"step-3": "from django.urls import path\nfrom . import views\napp_name = 'restuarant'\nurlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',\n views.restuarent, name='restuarant'), path('login/restaurant/', views.\n restLogin, name='rlogin'), path('register/restaurant/', views.\n restRegister, name='rregister'), path('profile/restaurant/', views.\n restaurantProfile, name='rprofile'), path('restaurant/create/', views.\n createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',\n views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',\n views.orderlist, name='orderlist'), path('restaurant/menu/', views.\n menuManipulation, name='mmenu'), path('logout/', views.Logout, name=\n 'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=\n 'menu'), path('checkout/', views.checkout, name='checkout'), path(\n 'profile/change_password/', views.change_password, name='change_password')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df_grade.head()
<|reserved_special_token_0|>
df_sinfo.head()
<|reserved_special_token_0|>
df_sinfo.head()
<|reserved_special_token_0|>
df_merge.head()
<|reserved_special_token_0|>
for name in ['姓名', '性别'][::-1]:
new_columns.remove(name)
new_columns.insert(new_columns.index('学号') + 1, name)
<|reserved_special_token_0|>
df_merge.head()
df_merge.to_excel('合并后的数据表.xlsx', index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df_grade = pd.read_excel('学生成绩表.xlsx')
df_grade.head()
df_sinfo = pd.read_excel('学生信息表.xlsx')
df_sinfo.head()
df_sinfo = df_sinfo[['学号', '姓名', '性别']]
df_sinfo.head()
df_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')
df_merge.head()
new_columns = df_merge.columns.to_list()
for name in ['姓名', '性别'][::-1]:
new_columns.remove(name)
new_columns.insert(new_columns.index('学号') + 1, name)
df_merge = df_merge.reindex(columns=new_columns)
df_merge.head()
df_merge.to_excel('合并后的数据表.xlsx', index=False)
<|reserved_special_token_1|>
import pandas as pd
df_grade = pd.read_excel('学生成绩表.xlsx')
df_grade.head()
df_sinfo = pd.read_excel('学生信息表.xlsx')
df_sinfo.head()
df_sinfo = df_sinfo[['学号', '姓名', '性别']]
df_sinfo.head()
df_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')
df_merge.head()
new_columns = df_merge.columns.to_list()
for name in ['姓名', '性别'][::-1]:
new_columns.remove(name)
new_columns.insert(new_columns.index('学号') + 1, name)
df_merge = df_merge.reindex(columns=new_columns)
df_merge.head()
df_merge.to_excel('合并后的数据表.xlsx', index=False)
<|reserved_special_token_1|>
#coding=utf-8
import pandas as pd
# 学生成绩表
df_grade = pd.read_excel("学生成绩表.xlsx")
df_grade.head()
# 学生信息表
df_sinfo = pd.read_excel("学生信息表.xlsx")
df_sinfo.head()
# 只筛选第二个表的少量的列
df_sinfo = df_sinfo[["学号", "姓名", "性别"]]
df_sinfo.head()
# join
df_merge = pd.merge(left=df_grade, right=df_sinfo, left_on="学号", right_on="学号")
df_merge.head()
# 将columns变成python的列表形式
new_columns = df_merge.columns.to_list()
# 按逆序insert,会将"姓名"/"性别"放到"学号"的后面
for name in ["姓名", "性别"][::-1]:
new_columns.remove(name)
new_columns.insert(new_columns.index("学号")+1, name)
df_merge = df_merge.reindex(columns=new_columns)
df_merge.head()
df_merge.to_excel("合并后的数据表.xlsx", index=False)
|
flexible
|
{
"blob_id": "f6c48731b2a4e0a6f1f93034ee9d11121c2d0427",
"index": 6810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf_grade.head()\n<mask token>\ndf_sinfo.head()\n<mask token>\ndf_sinfo.head()\n<mask token>\ndf_merge.head()\n<mask token>\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\n<mask token>\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-3": "<mask token>\ndf_grade = pd.read_excel('学生成绩表.xlsx')\ndf_grade.head()\ndf_sinfo = pd.read_excel('学生信息表.xlsx')\ndf_sinfo.head()\ndf_sinfo = df_sinfo[['学号', '姓名', '性别']]\ndf_sinfo.head()\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')\ndf_merge.head()\nnew_columns = df_merge.columns.to_list()\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-4": "import pandas as pd\ndf_grade = pd.read_excel('学生成绩表.xlsx')\ndf_grade.head()\ndf_sinfo = pd.read_excel('学生信息表.xlsx')\ndf_sinfo.head()\ndf_sinfo = df_sinfo[['学号', '姓名', '性别']]\ndf_sinfo.head()\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on='学号', right_on='学号')\ndf_merge.head()\nnew_columns = df_merge.columns.to_list()\nfor name in ['姓名', '性别'][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index('学号') + 1, name)\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\ndf_merge.to_excel('合并后的数据表.xlsx', index=False)\n",
"step-5": "#coding=utf-8\nimport pandas as pd\n\n# 学生成绩表\ndf_grade = pd.read_excel(\"学生成绩表.xlsx\") \ndf_grade.head()\n\n# 学生信息表\ndf_sinfo = pd.read_excel(\"学生信息表.xlsx\") \ndf_sinfo.head()\n\n# 只筛选第二个表的少量的列\ndf_sinfo = df_sinfo[[\"学号\", \"姓名\", \"性别\"]]\ndf_sinfo.head()\n\n# join\ndf_merge = pd.merge(left=df_grade, right=df_sinfo, left_on=\"学号\", right_on=\"学号\")\ndf_merge.head()\n\n# 将columns变成python的列表形式\nnew_columns = df_merge.columns.to_list()\n\n# 按逆序insert,会将\"姓名\"/\"性别\"放到\"学号\"的后面\nfor name in [\"姓名\", \"性别\"][::-1]:\n new_columns.remove(name)\n new_columns.insert(new_columns.index(\"学号\")+1, name)\n\n\ndf_merge = df_merge.reindex(columns=new_columns)\ndf_merge.head()\n\ndf_merge.to_excel(\"合并后的数据表.xlsx\", index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Utility functions to do get frequencies of n-grams
Author: Jesus I. Ramirez Franco
December 2018
'''
import nltk
import pandas as pd
from nltk.stem.snowball import SnowballStemmer
from pycorenlp import StanfordCoreNLP
import math
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import string
nlp = StanfordCoreNLP('http://localhost:9000/')
pos_not_included = ['CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']
pos_not_included_1 = ['NN', 'NNS','NP', 'NPS','CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']
stemmer = SnowballStemmer("english")
#regex_tokenizer = RegexpTokenizer(r'\w+') # Tokenizer that removes punctuation
def clean_doc(text, language='english'):
'''
Removes unknown characters and punctuation, change capital to lower letters and remove
stop words. If stem=False
Inputs:
sentence (string): a sting to be cleaned
Returns: a string
'''
#tokens = regex_tokenizer.tokenize(text)
tokens = nltk.word_tokenize(text)
tokens = [t.lower() for t in tokens]
tokens = [t for t in tokens if t not in stopwords.words(language)+[p for p in string.punctuation]]
return ' '.join(tokens)
def csv_as_text(file_name):
'''
Opens a csv file with sentences and creates a string
Inputs:
- file_name (str): name of the file to open
Returns a string
'''
try:
df = pd.read_csv(file_name)
texts_list = set(list(df['0']))
return ' '.join(texts_list)
except:
pass
def gettin_all_text(list_of_files):
'''
Opens all csv files with sentences and returns a corpus
Inputs:
-list_of_files (list): a list with the names of the files to open
Returns a string
'''
all_text = [csv_as_text(file) for file in list_of_files]
all_text = [text for text in all_text if type(text) == str]
all_str = ' '.join(all_text)
return all_str
def all_text_list(list_of_files):
'''
Opens all csv files with sentences and returns a list of texts
Inputs:
-list_of_files (list): a list with the names of the files to open
Returns a list
'''
all_text = [csv_as_text(file) for file in list_of_files]
all_text = [text for text in all_text if type(text) == str]
return all_text
def pos_filter(list_of_texts, filter_list=pos_not_included):
'''
Removes the words identified with the Part of Speech included
in the filter list, from every text in the list of texts.
Inputs:
- list_of_texts (list of strings): list with the texts to be analyzed
- filter_list (list of strings): list with part of speech to eliminate
Returns a list of cleaned texts
'''
filtered_texts = []
for text in list_of_texts:
pos = nlp.annotate(text, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']
filtered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]
filtered_str = ' '.join(filtered_words)
filtered_texts.append(filtered_str)
return filtered_texts
def pos_filter_text(text, filter_list=pos_not_included):
'''
Removes the words identified with the Part of Speech included
in the filter list, from a given text.
Inputs:
- text (str): text to be analyzed
- filter_list (list of strings): list with part of speech to eliminate
Returns a cleaned text
'''
text_list = make_chunks(text)
temp = []
for t in text_list:
pos = nlp.annotate(t, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']
filtered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]
filtered_str = ' '.join(filtered_words)
temp.append(filtered_str)
final_text = ' '.join(temp)
return final_text
def pos_filter_corpus(corpus):
'''
Removes the words identified with the Part of Speech included
in the filter list, from every text in the corpus.
Inputs:
- corpus (dict): Dictionary where every key is an starting link and
and every valu is a text associated with the starting link.
Returns a dictionary with the cleaned texts.
'''
results = {}
for k, v in corpus.items():
results[k] = pos_filter_text(v)
return results
def make_chunks(text, max_size=95000):
'''
Creates chunks of text with lenght less than or equal to the
defined maximum size, from an original text.
Inputs:
- text (str):
- max_size (int):
Returns a list of chunks
'''
tokens = nltk.word_tokenize(text)
chunks = []
chunk = []
count = 0
for word in tokens:
if count < max_size-len(word):
chunk.append(word)
count += len(word)+1
else:
chunks.append(' '.join(chunk))
count = len(word)
chunk = []
chunk.append(word)
chunks.append(' '.join(chunk))
return chunks
def tokens_freq(corpus, size):
'''
Computes the frequency of n-grams according to size and
retuns an ordered data frame.
Inputs:
corpus (string): text to be analized
size (int): size of n-grams
Returns: a data frame
'''
tokens = nltk.word_tokenize(corpus)
frequencies = {}
complete = tokens + tokens[:size - 1]
n_grams = []
for i in range(len(tokens)):
l = i
h = i + size-1
n_grams.append(', '.join(complete[l:h+1]))
for ng in n_grams:
if ng not in frequencies.keys():
frequencies[ng] = 1
else:
frequencies[ng] += 1
freq_list = [(k, v) for k, v in frequencies.items()]
df = pd.DataFrame(freq_list, columns=[str(size)+'-gram', 'Frequency'])
return df.sort_values(by='Frequency', ascending=False)[:20]
|
normal
|
{
"blob_id": "367c3b4da38623e78f2853f9d3464a414ad049c2",
"index": 9596,
"step-1": "<mask token>\n\n\ndef clean_doc(text, language='english'):\n \"\"\"\n\tRemoves unknown characters and punctuation, change capital to lower letters and remove\n\tstop words. If stem=False\n\tInputs:\n\tsentence (string): a sting to be cleaned\n\tReturns: a string\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n tokens = [t.lower() for t in tokens]\n tokens = [t for t in tokens if t not in stopwords.words(language) + [p for\n p in string.punctuation]]\n return ' '.join(tokens)\n\n\n<mask token>\n\n\ndef pos_filter_text(text, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from a given text.\n\tInputs:\n\t\t- text (str): text to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a cleaned text\n\t\"\"\"\n text_list = make_chunks(text)\n temp = []\n for t in text_list:\n pos = nlp.annotate(t, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n temp.append(filtered_str)\n final_text = ' '.join(temp)\n return final_text\n\n\n<mask token>\n\n\ndef make_chunks(text, max_size=95000):\n \"\"\"\n\tCreates chunks of text with lenght less than or equal to the \n\tdefined maximum size, from an original text.\n\tInputs:\n\t\t- text (str):\n\t\t- max_size (int):\n\tReturns a list of chunks\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n chunks = []\n chunk = []\n count = 0\n for word in tokens:\n if count < max_size - len(word):\n chunk.append(word)\n count += len(word) + 1\n else:\n chunks.append(' '.join(chunk))\n count = len(word)\n chunk = []\n chunk.append(word)\n chunks.append(' '.join(chunk))\n return chunks\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean_doc(text, language='english'):\n \"\"\"\n\tRemoves unknown characters and punctuation, change capital to lower letters and remove\n\tstop words. If stem=False\n\tInputs:\n\tsentence (string): a sting to be cleaned\n\tReturns: a string\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n tokens = [t.lower() for t in tokens]\n tokens = [t for t in tokens if t not in stopwords.words(language) + [p for\n p in string.punctuation]]\n return ' '.join(tokens)\n\n\ndef csv_as_text(file_name):\n \"\"\"\n\tOpens a csv file with sentences and creates a string\n\tInputs:\n\t\t- file_name (str): name of the file to open\n\tReturns a string\n\t\"\"\"\n try:\n df = pd.read_csv(file_name)\n texts_list = set(list(df['0']))\n return ' '.join(texts_list)\n except:\n pass\n\n\n<mask token>\n\n\ndef all_text_list(list_of_files):\n \"\"\"\n\tOpens all csv files with sentences and returns a list of texts\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a list\n\t\"\"\"\n all_text = [csv_as_text(file) for file in list_of_files]\n all_text = [text for text in all_text if type(text) == str]\n return all_text\n\n\ndef pos_filter(list_of_texts, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the list of texts.\n\tInputs:\n\t\t- list_of_texts (list of strings): list with the texts to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a list of cleaned texts\n\t\"\"\"\n filtered_texts = []\n for text in list_of_texts:\n pos = nlp.annotate(text, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n filtered_texts.append(filtered_str)\n return filtered_texts\n\n\ndef pos_filter_text(text, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from a given text.\n\tInputs:\n\t\t- text (str): text to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a cleaned text\n\t\"\"\"\n text_list = make_chunks(text)\n temp = []\n for t in text_list:\n pos = nlp.annotate(t, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n temp.append(filtered_str)\n final_text = ' '.join(temp)\n return final_text\n\n\ndef pos_filter_corpus(corpus):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the corpus.\n\tInputs:\n\t\t- corpus (dict): Dictionary where every key is an starting link and \n\t\t and every valu is a text associated with the starting link.\n\tReturns a dictionary with the cleaned texts.\n\t\"\"\"\n results = {}\n for k, v in corpus.items():\n results[k] = pos_filter_text(v)\n return results\n\n\ndef make_chunks(text, max_size=95000):\n \"\"\"\n\tCreates chunks of text with lenght less than or equal to the \n\tdefined maximum size, from an original text.\n\tInputs:\n\t\t- text (str):\n\t\t- max_size (int):\n\tReturns a list of chunks\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n chunks = []\n chunk = []\n count = 0\n for word in tokens:\n if count < max_size - len(word):\n chunk.append(word)\n count += len(word) + 1\n else:\n chunks.append(' '.join(chunk))\n count = len(word)\n chunk = []\n chunk.append(word)\n chunks.append(' '.join(chunk))\n return chunks\n\n\ndef tokens_freq(corpus, size):\n \"\"\"\n\tComputes the frequency of n-grams according to size and\n\tretuns an ordered data frame.\n\tInputs:\n\t\tcorpus (string): text to be analized\n\t\tsize (int): size of n-grams\n\tReturns: a data frame\n\t\"\"\"\n tokens = nltk.word_tokenize(corpus)\n frequencies = {}\n complete = tokens + tokens[:size - 1]\n n_grams = []\n for i in range(len(tokens)):\n l = i\n h = i + size - 1\n n_grams.append(', '.join(complete[l:h + 1]))\n for ng in n_grams:\n if ng not in frequencies.keys():\n frequencies[ng] = 1\n else:\n frequencies[ng] += 1\n freq_list = [(k, v) for k, v in frequencies.items()]\n df = pd.DataFrame(freq_list, columns=[str(size) + '-gram', 'Frequency'])\n return df.sort_values(by='Frequency', ascending=False)[:20]\n",
"step-3": "<mask token>\nnlp = StanfordCoreNLP('http://localhost:9000/')\npos_not_included = ['CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP',\n 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']\npos_not_included_1 = ['NN', 'NNS', 'NP', 'NPS', 'CC', 'CD', 'DT', 'FW',\n 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``',\n '(', ')', ',', ':']\nstemmer = SnowballStemmer('english')\n\n\ndef clean_doc(text, language='english'):\n \"\"\"\n\tRemoves unknown characters and punctuation, change capital to lower letters and remove\n\tstop words. If stem=False\n\tInputs:\n\tsentence (string): a sting to be cleaned\n\tReturns: a string\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n tokens = [t.lower() for t in tokens]\n tokens = [t for t in tokens if t not in stopwords.words(language) + [p for\n p in string.punctuation]]\n return ' '.join(tokens)\n\n\ndef csv_as_text(file_name):\n \"\"\"\n\tOpens a csv file with sentences and creates a string\n\tInputs:\n\t\t- file_name (str): name of the file to open\n\tReturns a string\n\t\"\"\"\n try:\n df = pd.read_csv(file_name)\n texts_list = set(list(df['0']))\n return ' '.join(texts_list)\n except:\n pass\n\n\ndef gettin_all_text(list_of_files):\n \"\"\"\n\tOpens all csv files with sentences and returns a corpus\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a string\n\t\"\"\"\n all_text = [csv_as_text(file) for file in list_of_files]\n all_text = [text for text in all_text if type(text) == str]\n all_str = ' '.join(all_text)\n return all_str\n\n\ndef all_text_list(list_of_files):\n \"\"\"\n\tOpens all csv files with sentences and returns a list of texts\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a list\n\t\"\"\"\n all_text = [csv_as_text(file) for file in list_of_files]\n all_text = [text for text in all_text if type(text) == str]\n return all_text\n\n\ndef pos_filter(list_of_texts, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the list of texts.\n\tInputs:\n\t\t- list_of_texts (list of strings): list with the texts to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a list of cleaned texts\n\t\"\"\"\n filtered_texts = []\n for text in list_of_texts:\n pos = nlp.annotate(text, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n filtered_texts.append(filtered_str)\n return filtered_texts\n\n\ndef pos_filter_text(text, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from a given text.\n\tInputs:\n\t\t- text (str): text to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a cleaned text\n\t\"\"\"\n text_list = make_chunks(text)\n temp = []\n for t in text_list:\n pos = nlp.annotate(t, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n temp.append(filtered_str)\n final_text = ' '.join(temp)\n return final_text\n\n\ndef pos_filter_corpus(corpus):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the corpus.\n\tInputs:\n\t\t- corpus (dict): Dictionary where every key is an starting link and \n\t\t and every valu is a text associated with the starting link.\n\tReturns a dictionary with the cleaned texts.\n\t\"\"\"\n results = {}\n for k, v in corpus.items():\n results[k] = pos_filter_text(v)\n return results\n\n\ndef make_chunks(text, max_size=95000):\n \"\"\"\n\tCreates chunks of text with lenght less than or equal to the \n\tdefined maximum size, from an original text.\n\tInputs:\n\t\t- text (str):\n\t\t- max_size (int):\n\tReturns a list of chunks\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n chunks = []\n chunk = []\n count = 0\n for word in tokens:\n if count < max_size - len(word):\n chunk.append(word)\n count += len(word) + 1\n else:\n chunks.append(' '.join(chunk))\n count = len(word)\n chunk = []\n chunk.append(word)\n chunks.append(' '.join(chunk))\n return chunks\n\n\ndef tokens_freq(corpus, size):\n \"\"\"\n\tComputes the frequency of n-grams according to size and\n\tretuns an ordered data frame.\n\tInputs:\n\t\tcorpus (string): text to be analized\n\t\tsize (int): size of n-grams\n\tReturns: a data frame\n\t\"\"\"\n tokens = nltk.word_tokenize(corpus)\n frequencies = {}\n complete = tokens + tokens[:size - 1]\n n_grams = []\n for i in range(len(tokens)):\n l = i\n h = i + size - 1\n n_grams.append(', '.join(complete[l:h + 1]))\n for ng in n_grams:\n if ng not in frequencies.keys():\n frequencies[ng] = 1\n else:\n frequencies[ng] += 1\n freq_list = [(k, v) for k, v in frequencies.items()]\n df = pd.DataFrame(freq_list, columns=[str(size) + '-gram', 'Frequency'])\n return df.sort_values(by='Frequency', ascending=False)[:20]\n",
"step-4": "<mask token>\nimport nltk\nimport pandas as pd\nfrom nltk.stem.snowball import SnowballStemmer\nfrom pycorenlp import StanfordCoreNLP\nimport math\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport string\nnlp = StanfordCoreNLP('http://localhost:9000/')\npos_not_included = ['CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP',\n 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']\npos_not_included_1 = ['NN', 'NNS', 'NP', 'NPS', 'CC', 'CD', 'DT', 'FW',\n 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``',\n '(', ')', ',', ':']\nstemmer = SnowballStemmer('english')\n\n\ndef clean_doc(text, language='english'):\n \"\"\"\n\tRemoves unknown characters and punctuation, change capital to lower letters and remove\n\tstop words. If stem=False\n\tInputs:\n\tsentence (string): a sting to be cleaned\n\tReturns: a string\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n tokens = [t.lower() for t in tokens]\n tokens = [t for t in tokens if t not in stopwords.words(language) + [p for\n p in string.punctuation]]\n return ' '.join(tokens)\n\n\ndef csv_as_text(file_name):\n \"\"\"\n\tOpens a csv file with sentences and creates a string\n\tInputs:\n\t\t- file_name (str): name of the file to open\n\tReturns a string\n\t\"\"\"\n try:\n df = pd.read_csv(file_name)\n texts_list = set(list(df['0']))\n return ' '.join(texts_list)\n except:\n pass\n\n\ndef gettin_all_text(list_of_files):\n \"\"\"\n\tOpens all csv files with sentences and returns a corpus\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a string\n\t\"\"\"\n all_text = [csv_as_text(file) for file in list_of_files]\n all_text = [text for text in all_text if type(text) == str]\n all_str = ' '.join(all_text)\n return all_str\n\n\ndef all_text_list(list_of_files):\n \"\"\"\n\tOpens all csv files with sentences and returns a list of texts\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a list\n\t\"\"\"\n all_text = [csv_as_text(file) for file in list_of_files]\n all_text = [text for text in all_text if type(text) == str]\n return all_text\n\n\ndef pos_filter(list_of_texts, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the list of texts.\n\tInputs:\n\t\t- list_of_texts (list of strings): list with the texts to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a list of cleaned texts\n\t\"\"\"\n filtered_texts = []\n for text in list_of_texts:\n pos = nlp.annotate(text, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n filtered_texts.append(filtered_str)\n return filtered_texts\n\n\ndef pos_filter_text(text, filter_list=pos_not_included):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from a given text.\n\tInputs:\n\t\t- text (str): text to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a cleaned text\n\t\"\"\"\n text_list = make_chunks(text)\n temp = []\n for t in text_list:\n pos = nlp.annotate(t, properties={'annotators': 'pos',\n 'outputFormat': 'json'})['sentences'][0]['tokens']\n filtered_words = [stemmer.stem(token['word']) for token in pos if \n token['pos'] not in filter_list]\n filtered_str = ' '.join(filtered_words)\n temp.append(filtered_str)\n final_text = ' '.join(temp)\n return final_text\n\n\ndef pos_filter_corpus(corpus):\n \"\"\"\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the corpus.\n\tInputs:\n\t\t- corpus (dict): Dictionary where every key is an starting link and \n\t\t and every valu is a text associated with the starting link.\n\tReturns a dictionary with the cleaned texts.\n\t\"\"\"\n results = {}\n for k, v in corpus.items():\n results[k] = pos_filter_text(v)\n return results\n\n\ndef make_chunks(text, max_size=95000):\n \"\"\"\n\tCreates chunks of text with lenght less than or equal to the \n\tdefined maximum size, from an original text.\n\tInputs:\n\t\t- text (str):\n\t\t- max_size (int):\n\tReturns a list of chunks\n\t\"\"\"\n tokens = nltk.word_tokenize(text)\n chunks = []\n chunk = []\n count = 0\n for word in tokens:\n if count < max_size - len(word):\n chunk.append(word)\n count += len(word) + 1\n else:\n chunks.append(' '.join(chunk))\n count = len(word)\n chunk = []\n chunk.append(word)\n chunks.append(' '.join(chunk))\n return chunks\n\n\ndef tokens_freq(corpus, size):\n \"\"\"\n\tComputes the frequency of n-grams according to size and\n\tretuns an ordered data frame.\n\tInputs:\n\t\tcorpus (string): text to be analized\n\t\tsize (int): size of n-grams\n\tReturns: a data frame\n\t\"\"\"\n tokens = nltk.word_tokenize(corpus)\n frequencies = {}\n complete = tokens + tokens[:size - 1]\n n_grams = []\n for i in range(len(tokens)):\n l = i\n h = i + size - 1\n n_grams.append(', '.join(complete[l:h + 1]))\n for ng in n_grams:\n if ng not in frequencies.keys():\n frequencies[ng] = 1\n else:\n frequencies[ng] += 1\n freq_list = [(k, v) for k, v in frequencies.items()]\n df = pd.DataFrame(freq_list, columns=[str(size) + '-gram', 'Frequency'])\n return df.sort_values(by='Frequency', ascending=False)[:20]\n",
"step-5": "'''\nUtility functions to do get frequencies of n-grams\n\nAuthor: Jesus I. Ramirez Franco\nDecember 2018\n'''\nimport nltk\nimport pandas as pd\nfrom nltk.stem.snowball import SnowballStemmer\nfrom pycorenlp import StanfordCoreNLP\nimport math\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport string\n\nnlp = StanfordCoreNLP('http://localhost:9000/')\npos_not_included = ['CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']\npos_not_included_1 = ['NN', 'NNS','NP', 'NPS','CC', 'CD', 'DT', 'FW', 'IN', 'LS', 'PP', 'PP$', 'WP', 'WP$', 'WRB', 'WDT', '#', '$', '“', '``', '(', ')', ',', ':']\nstemmer = SnowballStemmer(\"english\")\n#regex_tokenizer = RegexpTokenizer(r'\\w+') # Tokenizer that removes punctuation\n\ndef clean_doc(text, language='english'):\n\t'''\n\tRemoves unknown characters and punctuation, change capital to lower letters and remove\n\tstop words. If stem=False\n\tInputs:\n\tsentence (string): a sting to be cleaned\n\tReturns: a string\n\t'''\n\t#tokens = regex_tokenizer.tokenize(text)\n\ttokens = nltk.word_tokenize(text)\n\ttokens = [t.lower() for t in tokens]\n\ttokens = [t for t in tokens if t not in stopwords.words(language)+[p for p in string.punctuation]]\n\treturn ' '.join(tokens)\n\n\ndef csv_as_text(file_name):\n\t'''\n\tOpens a csv file with sentences and creates a string\n\tInputs:\n\t\t- file_name (str): name of the file to open\n\tReturns a string\n\t'''\n\ttry:\n\t\tdf = pd.read_csv(file_name)\n\t\ttexts_list = set(list(df['0']))\n\t\treturn ' '.join(texts_list)\n\texcept:\n\t\tpass\n\n\ndef gettin_all_text(list_of_files):\n\t'''\n\tOpens all csv files with sentences and returns a corpus\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a string\n\t'''\n\tall_text = [csv_as_text(file) for file in list_of_files]\n\tall_text = [text for text in all_text if type(text) == str]\n\tall_str = ' '.join(all_text)\n\treturn all_str\n\n\ndef all_text_list(list_of_files):\n\t'''\n\tOpens all csv files with sentences and returns a list of texts\n\tInputs:\n\t\t-list_of_files (list): a list with the names of the files to open\n\tReturns a list\n\t'''\n\tall_text = [csv_as_text(file) for file in list_of_files]\n\tall_text = [text for text in all_text if type(text) == str]\n\treturn all_text\n\ndef pos_filter(list_of_texts, filter_list=pos_not_included):\n\t'''\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the list of texts.\n\tInputs:\n\t\t- list_of_texts (list of strings): list with the texts to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a list of cleaned texts\n\t'''\n\tfiltered_texts = []\n\tfor text in list_of_texts:\n\t\tpos = nlp.annotate(text, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']\n\t\tfiltered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]\n\t\tfiltered_str = ' '.join(filtered_words)\n\t\tfiltered_texts.append(filtered_str)\n\treturn filtered_texts\n\n\ndef pos_filter_text(text, filter_list=pos_not_included):\n\t'''\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from a given text.\n\tInputs:\n\t\t- text (str): text to be analyzed\n\t\t- filter_list (list of strings): list with part of speech to eliminate\n\tReturns a cleaned text\n\t'''\n\ttext_list = make_chunks(text)\n\ttemp = []\n\tfor t in text_list:\n\t\tpos = nlp.annotate(t, properties={'annotators': 'pos', 'outputFormat': 'json'})['sentences'][0]['tokens']\n\t\tfiltered_words = [stemmer.stem(token['word']) for token in pos if token['pos'] not in filter_list]\n\t\tfiltered_str = ' '.join(filtered_words)\n\t\ttemp.append(filtered_str)\n\tfinal_text = ' '.join(temp)\n\treturn final_text\n\n\ndef pos_filter_corpus(corpus):\n\t'''\n\tRemoves the words identified with the Part of Speech included \n\tin the filter list, from every text in the corpus.\n\tInputs:\n\t\t- corpus (dict): Dictionary where every key is an starting link and \n\t\t and every valu is a text associated with the starting link.\n\tReturns a dictionary with the cleaned texts.\n\t'''\n\tresults = {}\n\tfor k, v in corpus.items():\n\t\tresults[k] = pos_filter_text(v)\n\treturn results\n\ndef make_chunks(text, max_size=95000):\n\t'''\n\tCreates chunks of text with lenght less than or equal to the \n\tdefined maximum size, from an original text.\n\tInputs:\n\t\t- text (str):\n\t\t- max_size (int):\n\tReturns a list of chunks\n\t'''\n\ttokens = nltk.word_tokenize(text)\n\tchunks = []\n\tchunk = []\n\tcount = 0\n\tfor word in tokens:\n\t\tif count < max_size-len(word):\n\t\t\tchunk.append(word)\n\t\t\tcount += len(word)+1\n\t\telse:\n\t\t\tchunks.append(' '.join(chunk))\n\t\t\tcount = len(word)\n\t\t\tchunk = []\n\t\t\tchunk.append(word)\n\tchunks.append(' '.join(chunk))\n\treturn chunks\n\n\ndef tokens_freq(corpus, size):\n\t'''\n\tComputes the frequency of n-grams according to size and\n\tretuns an ordered data frame.\n\tInputs:\n\t\tcorpus (string): text to be analized\n\t\tsize (int): size of n-grams\n\tReturns: a data frame\n\t'''\n\t\n\ttokens = nltk.word_tokenize(corpus)\n\tfrequencies = {}\n\tcomplete = tokens + tokens[:size - 1]\n\n\tn_grams = []\n\tfor i in range(len(tokens)):\n\n\t\tl = i\n\t\th = i + size-1\n\t\tn_grams.append(', '.join(complete[l:h+1]))\n\n\tfor ng in n_grams:\n\t\tif ng not in frequencies.keys():\n\t\t\tfrequencies[ng] = 1\n\t\telse:\n\t\t\tfrequencies[ng] += 1\n\n\tfreq_list = [(k, v) for k, v in frequencies.items()]\n\tdf = pd.DataFrame(freq_list, columns=[str(size)+'-gram', 'Frequency'])\n\treturn df.sort_values(by='Frequency', ascending=False)[:20]\n",
"step-ids": [
3,
8,
10,
11,
12
]
}
|
[
3,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.create_all()
<|reserved_special_token_0|>
db.session.add(admin)
db.session.add(guest)
db.session.commit()
<|reserved_special_token_0|>
print(users)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.create_all()
admin = User('admin', 'admin@admin.com', 'admin')
guest = User('guest', 'guest@guest.com', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users)
<|reserved_special_token_1|>
from mf_app import db
from mf_app.models import User
db.create_all()
admin = User('admin', 'admin@admin.com', 'admin')
guest = User('guest', 'guest@guest.com', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users)
<|reserved_special_token_1|>
from mf_app import db
from mf_app.models import User
db.create_all()
#test input data
admin = User('admin', 'admin@admin.com', 'admin')
guest = User('guest', 'guest@guest.com', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users)
|
flexible
|
{
"blob_id": "99c2bd56deccc327faf659e91fc1fd0f6ff7a219",
"index": 3932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\n<mask token>\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\n<mask token>\nprint(users)\n",
"step-3": "<mask token>\ndb.create_all()\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-4": "from mf_app import db\nfrom mf_app.models import User\ndb.create_all()\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-5": "from mf_app import db\nfrom mf_app.models import User\n\ndb.create_all()\n\n#test input data\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\n\ndb.session.add(admin)\ndb.session.add(guest)\n\ndb.session.commit()\n\nusers = User.query.all()\nprint(users)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
helper.greeting('Hey, dummy')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'AdrianLeo'
helper.greeting('Hey, dummy')
<|reserved_special_token_1|>
import helper
__author__ = 'AdrianLeo'
helper.greeting('Hey, dummy')
<|reserved_special_token_1|>
import helper
__author__ = 'AdrianLeo'
helper.greeting("Hey, dummy")
|
flexible
|
{
"blob_id": "03156992355a756b2ae38735a98251eb611d4245",
"index": 2611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhelper.greeting('Hey, dummy')\n",
"step-3": "<mask token>\n__author__ = 'AdrianLeo'\nhelper.greeting('Hey, dummy')\n",
"step-4": "import helper\n__author__ = 'AdrianLeo'\nhelper.greeting('Hey, dummy')\n",
"step-5": "import helper\n\n__author__ = 'AdrianLeo'\n\nhelper.greeting(\"Hey, dummy\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('ratings.csv') as in_file:
csvreader = csv.reader(in_file)
with open('ratings_train.csv', 'w') as train_out:
with open('ratings_test.csv', 'w') as test_out:
for row in csvreader:
if not skipped_header:
skipped_header = True
continue
elif int(row[0]) <= 146541:
train_out.write(','.join(row[:-1]))
train_out.write('\n')
else:
test_out.write(','.join(row[:-1]))
test_out.write('\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
skipped_header = False
with open('ratings.csv') as in_file:
csvreader = csv.reader(in_file)
with open('ratings_train.csv', 'w') as train_out:
with open('ratings_test.csv', 'w') as test_out:
for row in csvreader:
if not skipped_header:
skipped_header = True
continue
elif int(row[0]) <= 146541:
train_out.write(','.join(row[:-1]))
train_out.write('\n')
else:
test_out.write(','.join(row[:-1]))
test_out.write('\n')
<|reserved_special_token_1|>
import csv
skipped_header = False
with open('ratings.csv') as in_file:
csvreader = csv.reader(in_file)
with open('ratings_train.csv', 'w') as train_out:
with open('ratings_test.csv', 'w') as test_out:
for row in csvreader:
if not skipped_header:
skipped_header = True
continue
elif int(row[0]) <= 146541:
train_out.write(','.join(row[:-1]))
train_out.write('\n')
else:
test_out.write(','.join(row[:-1]))
test_out.write('\n')
<|reserved_special_token_1|>
import csv
#ratings.csv must be in the same directory
skipped_header = False
with open("ratings.csv") as in_file:
csvreader = csv.reader(in_file)
#read each row of ratings.csv (userId,movieId,rating,timestamp)
with open("ratings_train.csv", 'w') as train_out:
with open("ratings_test.csv", 'w') as test_out:
for row in csvreader:
if not skipped_header:
skipped_header = True
continue
elif int(row[0]) <= 146541:
train_out.write(",".join(row[:-1]))
train_out.write("\n")
else: #rest of the data (16000 of them)
test_out.write(",".join(row[:-1]))
test_out.write("\n")
|
flexible
|
{
"blob_id": "e48a6a84268a0fe64e90714bd32712665934fc39",
"index": 2223,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-3": "<mask token>\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-4": "import csv\nskipped_header = False\nwith open('ratings.csv') as in_file:\n csvreader = csv.reader(in_file)\n with open('ratings_train.csv', 'w') as train_out:\n with open('ratings_test.csv', 'w') as test_out:\n for row in csvreader:\n if not skipped_header:\n skipped_header = True\n continue\n elif int(row[0]) <= 146541:\n train_out.write(','.join(row[:-1]))\n train_out.write('\\n')\n else:\n test_out.write(','.join(row[:-1]))\n test_out.write('\\n')\n",
"step-5": "import csv\r\n\r\n#ratings.csv must be in the same directory\r\n\r\nskipped_header = False\r\nwith open(\"ratings.csv\") as in_file:\r\n csvreader = csv.reader(in_file)\r\n\t#read each row of ratings.csv (userId,movieId,rating,timestamp)\r\n with open(\"ratings_train.csv\", 'w') as train_out:\r\n with open(\"ratings_test.csv\", 'w') as test_out:\r\n for row in csvreader:\r\n if not skipped_header:\r\n skipped_header = True\r\n continue\r\n elif int(row[0]) <= 146541:\r\n train_out.write(\",\".join(row[:-1]))\r\n train_out.write(\"\\n\")\r\n else: #rest of the data (16000 of them)\r\n test_out.write(\",\".join(row[:-1]))\r\n test_out.write(\"\\n\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tkinter
import csv
import datetime
import time
root = tkinter.Tk()
root.title("Attendance")
root.geometry("+450+250")
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
fileName = "Attendance/Attendance_"+date+".csv"
# open file
with open(fileName, newline="") as file:
reader = csv.reader(file)
# r and c tell us where to grid the labels
r = 0
for col in reader:
c = 0
for row in col:
row = row.strip("['']")
if r == 0:
label = tkinter.Label(root, width=20, height=4,
text=row, bg="#7d807e", relief=tkinter.RIDGE)
label.grid(row=r, column=c)
else:
label = tkinter.Label(root, width=20, height=4,
text=row, relief=tkinter.RIDGE)
label.grid(row=r, column=c)
c += 1
r += 1
fileName2 = "StudentDetails/StudentDetails.csv"
with open(fileName, newline="") as file:
reader = csv.reader(file)
# r and c tell us where to grid the labels
r2 = 0
for col in reader:
r2 += 1
total = r2-1
print(total)
present = r - 1
absent = total - present
label = tkinter.Label(root, width=20, height=4,
text="Present", fg="green", relief=tkinter.RIDGE)
label.grid(row=r+1, column=0)
label = tkinter.Label(root, width=20, height=4,
text=present, relief=tkinter.RIDGE)
label.grid(row=r+1, column=1)
label = tkinter.Label(root, width=20, height=4,
text="Absent", fg="red", relief=tkinter.RIDGE)
label.grid(row=r+1, column=2)
label = tkinter.Label(root, width=20, height=4,
text=absent, relief=tkinter.RIDGE)
label.grid(row=r+1, column=3)
root.mainloop()
|
normal
|
{
"blob_id": "2343a9d3e253b5a0347b5890a5d7b9c3be777669",
"index": 5958,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroot.title('Attendance')\nroot.geometry('+450+250')\n<mask token>\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\n<mask token>\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\n<mask token>\nprint(total)\n<mask token>\nlabel.grid(row=r + 1, column=0)\n<mask token>\nlabel.grid(row=r + 1, column=1)\n<mask token>\nlabel.grid(row=r + 1, column=2)\n<mask token>\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = tkinter.Tk()\nroot.title('Attendance')\nroot.geometry('+450+250')\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = 'Attendance/Attendance_' + date + '.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\nfileName2 = 'StudentDetails/StudentDetails.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\ntotal = r2 - 1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4, text='Present', fg='green',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=0)\nlabel = tkinter.Label(root, width=20, height=4, text=present, relief=\n tkinter.RIDGE)\nlabel.grid(row=r + 1, column=1)\nlabel = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=2)\nlabel = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter\n .RIDGE)\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-4": "import tkinter\nimport csv\nimport datetime\nimport time\nroot = tkinter.Tk()\nroot.title('Attendance')\nroot.geometry('+450+250')\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = 'Attendance/Attendance_' + date + '.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r = 0\n for col in reader:\n c = 0\n for row in col:\n row = row.strip(\"['']\")\n if r == 0:\n label = tkinter.Label(root, width=20, height=4, text=row,\n bg='#7d807e', relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4, text=row,\n relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n c += 1\n r += 1\nfileName2 = 'StudentDetails/StudentDetails.csv'\nwith open(fileName, newline='') as file:\n reader = csv.reader(file)\n r2 = 0\n for col in reader:\n r2 += 1\ntotal = r2 - 1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4, text='Present', fg='green',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=0)\nlabel = tkinter.Label(root, width=20, height=4, text=present, relief=\n tkinter.RIDGE)\nlabel.grid(row=r + 1, column=1)\nlabel = tkinter.Label(root, width=20, height=4, text='Absent', fg='red',\n relief=tkinter.RIDGE)\nlabel.grid(row=r + 1, column=2)\nlabel = tkinter.Label(root, width=20, height=4, text=absent, relief=tkinter\n .RIDGE)\nlabel.grid(row=r + 1, column=3)\nroot.mainloop()\n",
"step-5": "import tkinter\nimport csv\nimport datetime\nimport time\n\nroot = tkinter.Tk()\nroot.title(\"Attendance\")\nroot.geometry(\"+450+250\")\n\nts = time.time()\ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\nfileName = \"Attendance/Attendance_\"+date+\".csv\"\n# open file\nwith open(fileName, newline=\"\") as file:\n reader = csv.reader(file)\n # r and c tell us where to grid the labels\n r = 0\n for col in reader:\n c = 0\n for row in col:\n\n row = row.strip(\"['']\")\n\n if r == 0:\n label = tkinter.Label(root, width=20, height=4,\n text=row, bg=\"#7d807e\", relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n else:\n label = tkinter.Label(root, width=20, height=4,\n text=row, relief=tkinter.RIDGE)\n label.grid(row=r, column=c)\n\n c += 1\n r += 1\n\nfileName2 = \"StudentDetails/StudentDetails.csv\"\nwith open(fileName, newline=\"\") as file:\n reader = csv.reader(file)\n # r and c tell us where to grid the labels\n r2 = 0\n for col in reader:\n r2 += 1\n\ntotal = r2-1\nprint(total)\npresent = r - 1\nabsent = total - present\nlabel = tkinter.Label(root, width=20, height=4,\n text=\"Present\", fg=\"green\", relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=0)\nlabel = tkinter.Label(root, width=20, height=4,\n text=present, relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=1)\nlabel = tkinter.Label(root, width=20, height=4,\n text=\"Absent\", fg=\"red\", relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=2)\nlabel = tkinter.Label(root, width=20, height=4,\n text=absent, relief=tkinter.RIDGE)\nlabel.grid(row=r+1, column=3)\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))
print(ans)
<|reserved_special_token_1|>
n = int(input())
a = sorted([int(input()) for _ in range(n)])
x = a[:n // 2]
y = a[(n + 1) // 2:]
ans = 0
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))
print(ans)
<|reserved_special_token_1|>
n = int(input())
a = sorted([int(input()) for _ in range(n)])
x = a[:n//2]
y = a[(n + 1)//2:]
ans = 0
for i in range(len(x)):
ans += abs(x[i] - y[i])
for i in range(1, len(y)):
ans += abs(x[i - 1] - y[i])
if n % 2 == 1:
ans += max(
abs(a[n // 2] - x[-1]),
abs(a[n // 2] - y[0]),
)
print(ans)
|
flexible
|
{
"blob_id": "0e9d0927e8d69b0c0fad98479d47f2409c95a751",
"index": 794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))\nprint(ans)\n",
"step-3": "n = int(input())\na = sorted([int(input()) for _ in range(n)])\nx = a[:n // 2]\ny = a[(n + 1) // 2:]\nans = 0\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(abs(a[n // 2] - x[-1]), abs(a[n // 2] - y[0]))\nprint(ans)\n",
"step-4": "n = int(input())\na = sorted([int(input()) for _ in range(n)])\n\nx = a[:n//2]\ny = a[(n + 1)//2:]\n\nans = 0\nfor i in range(len(x)):\n ans += abs(x[i] - y[i])\nfor i in range(1, len(y)):\n ans += abs(x[i - 1] - y[i])\nif n % 2 == 1:\n ans += max(\n abs(a[n // 2] - x[-1]),\n abs(a[n // 2] - y[0]),\n )\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='ckanext-MYEXTENSION', version=version, description=
'description', long_description='\t', classifiers=[], keywords='',
author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=
'www.naver.com', license='free', packages=find_packages(exclude=[
'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',
'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,
install_requires=[], entry_points=
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
"""
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
version = '0.1'
setup(name='ckanext-MYEXTENSION', version=version, description=
'description', long_description='\t', classifiers=[], keywords='',
author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=
'www.naver.com', license='free', packages=find_packages(exclude=[
'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',
'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,
install_requires=[], entry_points=
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
"""
)
<|reserved_special_token_1|>
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='ckanext-MYEXTENSION', version=version, description=
'description', long_description='\t', classifiers=[], keywords='',
author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=
'www.naver.com', license='free', packages=find_packages(exclude=[
'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',
'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,
install_requires=[], entry_points=
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
"""
)
<|reserved_special_token_1|>
from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(
name='ckanext-MYEXTENSION',
version=version,
description="description",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='',
author='ldhspace',
author_email='ldhspace@yahoo.co.kr',
url='www.naver.com',
license='free',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=['ckanext', 'ckanext.MYEXTENSION'],
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points=\
"""
[ckan.plugins]
# Add plugins here, eg
usmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin
""",
)
|
flexible
|
{
"blob_id": "9d2c0d59b0b2b4e4fca942e648059738053c53d0",
"index": 9376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-3": "<mask token>\nversion = '0.1'\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-4": "from setuptools import setup, find_packages\nimport sys, os\nversion = '0.1'\nsetup(name='ckanext-MYEXTENSION', version=version, description=\n 'description', long_description='\\t', classifiers=[], keywords='',\n author='ldhspace', author_email='ldhspace@yahoo.co.kr', url=\n 'www.naver.com', license='free', packages=find_packages(exclude=[\n 'ez_setup', 'examples', 'tests']), namespace_packages=['ckanext',\n 'ckanext.MYEXTENSION'], include_package_data=True, zip_safe=False,\n install_requires=[], entry_points=\n \"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\"\n )\n",
"step-5": "from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1'\n\nsetup(\n\tname='ckanext-MYEXTENSION',\n\tversion=version,\n\tdescription=\"description\",\n\tlong_description=\"\"\"\\\n\t\"\"\",\n\tclassifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n\tkeywords='',\n\tauthor='ldhspace',\n\tauthor_email='ldhspace@yahoo.co.kr',\n\turl='www.naver.com',\n\tlicense='free',\n\tpackages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n\tnamespace_packages=['ckanext', 'ckanext.MYEXTENSION'],\n\tinclude_package_data=True,\n\tzip_safe=False,\n\tinstall_requires=[\n\t\t# -*- Extra requirements: -*-\n\t],\n\tentry_points=\\\n\t\"\"\"\n [ckan.plugins]\n\t# Add plugins here, eg\n\tusmetadata=ckanext.MYEXTENSION.plugin:USMetadataPlugin\n\t\"\"\",\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv
from matplotlib import pyplot as plt
from datetime import datetime
file_one = 'data/dwifh_all_sales.csv'
file_two = 'data/dwifh_bc_sales.csv'
# create code to automatically build a dictionary for each album?
with open(file_one) as fo:
reader = csv.reader(fo)
header = next(reader)
album = {}
dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], []
for row in reader:
if row[2].strip() == 'Harm\'s Way':
dates.append(float(row[0].strip()))
cd_income.append(int(float(row[4].strip())))
dd_income.append(int(float(row[5].strip())))
total_profit.append(int(float(row[7].strip())))
artist_payout.append(int(float(row[8].strip())))
else:
pass
album_alltime_profit = sum(total_profit)
artist_alltime_payout = sum(artist_payout)
# complete the dictionary for this album
album['title'] = 'Harm\'s Way'
album['period of sales'] = dates
album['cd_income_data'] = cd_income
album['dd_income_data'] = dd_income
album['all_time_profit'] = album_alltime_profit
album['all_time_payout'] = artist_alltime_payout
for key, value in album.items():
print(f'{key}: {value}')
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(album['period of sales'], album['dd_income_data'], c='red')
ax.plot(album['period of sales'], album['cd_income_data'], c = 'blue')
plt.title('{} Sales - All Time'.format(album['title']))
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel('CD (blue) and DD (red)', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
#plt.show()
# TASK:
# 1. get the names of the albums from the .csv file and store
# them in a list. make sure there are no duplicates.
# parse the csv file and create a dictionary for each album,
# assigning it the name taken from the name list.
# use: for album in album_list: so the process is done once
# for each album name.
# the dict created for each album contains all the data pulled
# from the csv file. create the dict, then append it to
# a list of dicts. this list will, when done, contain four
# dictionaries, one for each album.
# but since it's done in a loop, all four dicts get created
# automatically, but they contain different data, respective to
# each album.
|
normal
|
{
"blob_id": "53380810a3d9787fe7c373cf1829f2d849a91c3c",
"index": 8456,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n<mask token>\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\n<mask token>\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n",
"step-3": "<mask token>\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n",
"step-4": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [\n ], []\n for row in reader:\n if row[2].strip() == \"Harm's Way\":\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\nalbum['title'] = \"Harm's Way\"\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\nfor key, value in album.items():\n print(f'{key}: {value}')\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c='blue')\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n",
"step-5": "import csv\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\n\nfile_one = 'data/dwifh_all_sales.csv'\nfile_two = 'data/dwifh_bc_sales.csv'\n\n# create code to automatically build a dictionary for each album?\n\nwith open(file_one) as fo:\n reader = csv.reader(fo)\n header = next(reader)\n\n album = {}\n dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], []\n\n for row in reader:\n if row[2].strip() == 'Harm\\'s Way':\n dates.append(float(row[0].strip()))\n cd_income.append(int(float(row[4].strip())))\n dd_income.append(int(float(row[5].strip())))\n total_profit.append(int(float(row[7].strip())))\n artist_payout.append(int(float(row[8].strip())))\n else:\n pass\n\nalbum_alltime_profit = sum(total_profit)\nartist_alltime_payout = sum(artist_payout)\n\n# complete the dictionary for this album\nalbum['title'] = 'Harm\\'s Way'\nalbum['period of sales'] = dates\nalbum['cd_income_data'] = cd_income\nalbum['dd_income_data'] = dd_income\nalbum['all_time_profit'] = album_alltime_profit\nalbum['all_time_payout'] = artist_alltime_payout\n\nfor key, value in album.items():\n print(f'{key}: {value}')\n\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(album['period of sales'], album['dd_income_data'], c='red')\nax.plot(album['period of sales'], album['cd_income_data'], c = 'blue')\n\nplt.title('{} Sales - All Time'.format(album['title']))\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel('CD (blue) and DD (red)', fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n\n#plt.show()\n\n# TASK:\n# 1. get the names of the albums from the .csv file and store\n# them in a list. make sure there are no duplicates.\n\n# parse the csv file and create a dictionary for each album,\n# assigning it the name taken from the name list.\n# use: for album in album_list: so the process is done once\n# for each album name.\n# the dict created for each album contains all the data pulled\n# from the csv file. create the dict, then append it to\n# a list of dicts. this list will, when done, contain four\n# dictionaries, one for each album.\n# but since it's done in a loop, all four dicts get created\n# automatically, but they contain different data, respective to\n# each album.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while a == 1:
b = source()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
a = 2
while a == 1:
b = source()
c = function(b)
|
flexible
|
{
"blob_id": "56cae7b7a0338bd4a405cdc3cdcd9945a9df8823",
"index": 5839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile a == 1:\n b = source()\n<mask token>\n",
"step-3": "a = 2\nwhile a == 1:\n b = source()\nc = function(b)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import time
from datetime import datetime, timedelta
from git import Repo
class CommitAnalyzer():
"""
Takes path of the repo
"""
def __init__(self, repo_path):
self.repo_path = repo_path
self.repo = Repo(self.repo_path)
assert not self.repo.bare
def get_conflict_commits(self):
conflict_commits = []
current_date = datetime.now()
for commit in self.repo.iter_commits('master'):
parents = commit.parents
if len(parents) > 1 and "conflict" in commit.message.lower() and ".java" in commit.message.lower():
#if datetime.fromtimestamp(commit.committed_date) >= current_date - timedelta(5):
conflict_commits.append(commit)
return conflict_commits
#run script in cloned repo
commit_analyzer = CommitAnalyzer(os.getcwd())
for commit in commit_analyzer.get_conflict_commits():
print (commit, time.asctime(time.gmtime(commit.committed_date)))
|
normal
|
{
"blob_id": "8479c70fed36dc6f1e6094c832fb22d8c2e53e3a",
"index": 920,
"step-1": "<mask token>\n\n\nclass CommitAnalyzer:\n <mask token>\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\n<mask token>\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"step-3": "<mask token>\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"step-4": "import os\nimport time\nfrom datetime import datetime, timedelta\nfrom git import Repo\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"step-5": "import os\nimport time\nfrom datetime import datetime, timedelta\nfrom git import Repo\n\nclass CommitAnalyzer():\n\n\t\"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\tdef __init__(self, repo_path):\n\t\tself.repo_path = repo_path\n\t\tself.repo = Repo(self.repo_path)\n\t\tassert not self.repo.bare\n\n\tdef get_conflict_commits(self):\n\t\tconflict_commits = []\n\t\tcurrent_date = datetime.now()\n\t\tfor commit in self.repo.iter_commits('master'):\n\t\t\tparents = commit.parents\n\t\t\tif len(parents) > 1 and \"conflict\" in commit.message.lower() and \".java\" in commit.message.lower():\n\t\t\t\t#if datetime.fromtimestamp(commit.committed_date) >= current_date - timedelta(5):\n\t\t\t\tconflict_commits.append(commit)\n\n\t\treturn conflict_commits\n\n#run script in cloned repo\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n\tprint (commit, time.asctime(time.gmtime(commit.committed_date)))",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
def non_dupulicates_lette(word):
text = list(word);
print(text)
i=0
for i in range(len(text)):
for k in text:
print(c)
def has_dupulicates(word):
d= dict()
for c in word:
if c not in d:
d[c]=1
else:
d[c]+=1
for k in d:
if d[k]==1:
print(k)
else:
print(k,d[k])
return d
#count=0
#othercount=1
#sizeword=len(word)-1
#while count<sizeword:
#letter=word[count]
#while othercount<sizeword:
#if letter == word[othercount]:
#return True
#othercount= othercount+1
#count+=1
#return False
A='bccata'#['a','b','b','c']
non_dupulicates_lette(A)
#result=has_dupulicates(A)
#print(result)
|
normal
|
{
"blob_id": "8cd234c2ec1b36abd992cc1a46147376cc241ede",
"index": 3276,
"step-1": "<mask token>\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-2": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\n",
"step-3": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\n<mask token>\nnon_dupulicates_lette(A)\n",
"step-4": "def non_dupulicates_lette(word):\n text = list(word)\n print(text)\n i = 0\n for i in range(len(text)):\n for k in text:\n print(c)\n\n\ndef has_dupulicates(word):\n d = dict()\n for c in word:\n if c not in d:\n d[c] = 1\n else:\n d[c] += 1\n for k in d:\n if d[k] == 1:\n print(k)\n else:\n print(k, d[k])\n return d\n\n\nA = 'bccata'\nnon_dupulicates_lette(A)\n",
"step-5": "def non_dupulicates_lette(word):\n text = list(word);\n print(text)\n i=0\n for i in range(len(text)):\n for k in text:\n print(c)\n \ndef has_dupulicates(word):\n d= dict()\n for c in word:\n if c not in d:\n d[c]=1\n \n else:\n d[c]+=1\n\n\n for k in d:\n if d[k]==1:\n print(k)\n \n else:\n print(k,d[k])\n \n \n \n return d\n #count=0\n #othercount=1\n #sizeword=len(word)-1\n #while count<sizeword:\n #letter=word[count]\n #while othercount<sizeword:\n #if letter == word[othercount]:\n #return True\n #othercount= othercount+1\n\n #count+=1\n\n\n #return False\nA='bccata'#['a','b','b','c']\nnon_dupulicates_lette(A)\n#result=has_dupulicates(A)\n#print(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
"""
#####################################################################
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#See the license text in license.txt
#copyright 2008, University of Tennessee
######################################################################
import numpy as np
import os
from sas.sascalc.dataloader.data_info import Data1D
from sas.sascalc.dataloader.data_info import Detector
has_converter = True
try:
from sas.sascalc.data_util.nxsunit import Converter
except:
has_converter = False
class Reader:
"""
Class to load IGOR reduced .ABS files
"""
## File type
type_name = "IGOR 1D"
## Wildcards
type = ["IGOR 1D files (*.abs)|*.abs"]
## List of allowed extensions
ext = ['.abs', '.ABS']
def read(self, path):
"""
Load data file.
:param path: file path
:return: Data1D object, or None
:raise RuntimeError: when the file can't be opened
:raise ValueError: when the length of the data vectors are inconsistent
"""
if os.path.isfile(path):
basename = os.path.basename(path)
root, extension = os.path.splitext(basename)
if extension.lower() in self.ext:
try:
input_f = open(path,'r')
except:
raise RuntimeError, "abs_reader: cannot open %s" % path
buff = input_f.read()
lines = buff.split('\n')
x = np.zeros(0)
y = np.zeros(0)
dy = np.zeros(0)
dx = np.zeros(0)
output = Data1D(x, y, dy=dy, dx=dx)
detector = Detector()
output.detector.append(detector)
output.filename = basename
is_info = False
is_center = False
is_data_started = False
data_conv_q = None
data_conv_i = None
if has_converter == True and output.x_unit != '1/A':
data_conv_q = Converter('1/A')
# Test it
data_conv_q(1.0, output.x_unit)
if has_converter == True and output.y_unit != '1/cm':
data_conv_i = Converter('1/cm')
# Test it
data_conv_i(1.0, output.y_unit)
for line in lines:
# Information line 1
if is_info == True:
is_info = False
line_toks = line.split()
# Wavelength in Angstrom
try:
value = float(line_toks[1])
if has_converter == True and \
output.source.wavelength_unit != 'A':
conv = Converter('A')
output.source.wavelength = conv(value,
units=output.source.wavelength_unit)
else:
output.source.wavelength = value
except:
#goes to ASC reader
msg = "abs_reader: cannot open %s" % path
raise RuntimeError, msg
# Distance in meters
try:
value = float(line_toks[3])
if has_converter == True and \
detector.distance_unit != 'm':
conv = Converter('m')
detector.distance = conv(value,
units=detector.distance_unit)
else:
detector.distance = value
except:
#goes to ASC reader
msg = "abs_reader: cannot open %s" % path
raise RuntimeError, msg
# Transmission
try:
output.sample.transmission = float(line_toks[4])
except:
# Transmission is not a mandatory entry
pass
# Thickness in mm
try:
value = float(line_toks[5])
if has_converter == True and \
output.sample.thickness_unit != 'cm':
conv = Converter('cm')
output.sample.thickness = conv(value,
units=output.sample.thickness_unit)
else:
output.sample.thickness = value
except:
# Thickness is not a mandatory entry
pass
#MON CNT LAMBDA DET ANG DET DIST TRANS THICK
# AVE STEP
if line.count("LAMBDA") > 0:
is_info = True
# Find center info line
if is_center == True:
is_center = False
line_toks = line.split()
# Center in bin number
center_x = float(line_toks[0])
center_y = float(line_toks[1])
# Bin size
if has_converter == True and \
detector.pixel_size_unit != 'mm':
conv = Converter('mm')
detector.pixel_size.x = conv(5.0,
units=detector.pixel_size_unit)
detector.pixel_size.y = conv(5.0,
units=detector.pixel_size_unit)
else:
detector.pixel_size.x = 5.0
detector.pixel_size.y = 5.0
# Store beam center in distance units
# Det 640 x 640 mm
if has_converter == True and \
detector.beam_center_unit != 'mm':
conv = Converter('mm')
detector.beam_center.x = conv(center_x * 5.0,
units=detector.beam_center_unit)
detector.beam_center.y = conv(center_y * 5.0,
units=detector.beam_center_unit)
else:
detector.beam_center.x = center_x * 5.0
detector.beam_center.y = center_y * 5.0
# Detector type
try:
detector.name = line_toks[7]
except:
# Detector name is not a mandatory entry
pass
#BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L
# BSTOP(mm) DET_TYP
if line.count("BCENT") > 0:
is_center = True
# Parse the data
if is_data_started == True:
toks = line.split()
try:
_x = float(toks[0])
_y = float(toks[1])
_dy = float(toks[2])
_dx = float(toks[3])
if data_conv_q is not None:
_x = data_conv_q(_x, units=output.x_unit)
_dx = data_conv_i(_dx, units=output.x_unit)
if data_conv_i is not None:
_y = data_conv_i(_y, units=output.y_unit)
_dy = data_conv_i(_dy, units=output.y_unit)
x = np.append(x, _x)
y = np.append(y, _y)
dy = np.append(dy, _dy)
dx = np.append(dx, _dx)
except:
# Could not read this data line. If we are here
# it is because we are in the data section. Just
# skip it.
pass
#The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev.
# I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor|
if line.count("The 6 columns") > 0:
is_data_started = True
# Sanity check
if not len(y) == len(dy):
msg = "abs_reader: y and dy have different length"
raise ValueError, msg
# If the data length is zero, consider this as
# though we were not able to read the file.
if len(x) == 0:
raise ValueError, "ascii_reader: could not load file"
output.x = x[x != 0]
output.y = y[x != 0]
output.dy = dy[x != 0]
output.dx = dx[x != 0]
if data_conv_q is not None:
output.xaxis("\\rm{Q}", output.x_unit)
else:
output.xaxis("\\rm{Q}", 'A^{-1}')
if data_conv_i is not None:
output.yaxis("\\rm{Intensity}", output.y_unit)
else:
output.yaxis("\\rm{Intensity}", "cm^{-1}")
# Store loading process information
output.meta_data['loader'] = self.type_name
return output
else:
raise RuntimeError, "%s is not a file" % path
return None
|
normal
|
{
"blob_id": "3cdb39e201983e672f6c22c25492a120be3d0d48",
"index": 9937,
"step-1": "\"\"\"\n\"\"\"\n#####################################################################\n#This software was developed by the University of Tennessee as part of the\n#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)\n#project funded by the US National Science Foundation.\n#See the license text in license.txt\n#copyright 2008, University of Tennessee\n######################################################################\n\nimport numpy as np\nimport os\nfrom sas.sascalc.dataloader.data_info import Data1D\nfrom sas.sascalc.dataloader.data_info import Detector\n\nhas_converter = True\ntry:\n from sas.sascalc.data_util.nxsunit import Converter\nexcept:\n has_converter = False\n \n \nclass Reader:\n \"\"\"\n Class to load IGOR reduced .ABS files\n \"\"\"\n ## File type\n type_name = \"IGOR 1D\"\n ## Wildcards\n type = [\"IGOR 1D files (*.abs)|*.abs\"]\n ## List of allowed extensions\n ext = ['.abs', '.ABS']\n \n def read(self, path):\n \"\"\" \n Load data file.\n \n :param path: file path\n \n :return: Data1D object, or None\n \n :raise RuntimeError: when the file can't be opened\n :raise ValueError: when the length of the data vectors are inconsistent\n \"\"\"\n if os.path.isfile(path):\n basename = os.path.basename(path)\n root, extension = os.path.splitext(basename)\n if extension.lower() in self.ext:\n try:\n input_f = open(path,'r')\n except:\n raise RuntimeError, \"abs_reader: cannot open %s\" % path\n buff = input_f.read()\n lines = buff.split('\\n')\n x = np.zeros(0)\n y = np.zeros(0)\n dy = np.zeros(0)\n dx = np.zeros(0)\n output = Data1D(x, y, dy=dy, dx=dx)\n detector = Detector()\n output.detector.append(detector)\n output.filename = basename\n \n is_info = False\n is_center = False\n is_data_started = False\n \n data_conv_q = None\n data_conv_i = None\n \n if has_converter == True and output.x_unit != '1/A':\n data_conv_q = Converter('1/A')\n # Test it\n data_conv_q(1.0, output.x_unit)\n \n if has_converter == True and output.y_unit != '1/cm':\n data_conv_i = Converter('1/cm')\n # Test it\n data_conv_i(1.0, output.y_unit)\n \n for line in lines:\n \n # Information line 1\n if is_info == True:\n is_info = False\n line_toks = line.split()\n \n # Wavelength in Angstrom\n try:\n value = float(line_toks[1])\n if has_converter == True and \\\n output.source.wavelength_unit != 'A':\n conv = Converter('A')\n output.source.wavelength = conv(value,\n units=output.source.wavelength_unit)\n else:\n output.source.wavelength = value\n except:\n #goes to ASC reader\n msg = \"abs_reader: cannot open %s\" % path\n raise RuntimeError, msg\n \n # Distance in meters\n try:\n value = float(line_toks[3])\n if has_converter == True and \\\n detector.distance_unit != 'm':\n conv = Converter('m')\n detector.distance = conv(value,\n units=detector.distance_unit)\n else:\n detector.distance = value\n except:\n #goes to ASC reader\n msg = \"abs_reader: cannot open %s\" % path\n raise RuntimeError, msg\n # Transmission\n try:\n output.sample.transmission = float(line_toks[4])\n except:\n # Transmission is not a mandatory entry\n pass\n \n # Thickness in mm\n try:\n value = float(line_toks[5])\n if has_converter == True and \\\n output.sample.thickness_unit != 'cm':\n conv = Converter('cm')\n output.sample.thickness = conv(value,\n units=output.sample.thickness_unit)\n else:\n output.sample.thickness = value\n except:\n # Thickness is not a mandatory entry\n pass\n \n #MON CNT LAMBDA DET ANG DET DIST TRANS THICK \n # AVE STEP\n if line.count(\"LAMBDA\") > 0:\n is_info = True\n \n # Find center info line\n if is_center == True:\n is_center = False\n line_toks = line.split()\n # Center in bin number\n center_x = float(line_toks[0])\n center_y = float(line_toks[1])\n \n # Bin size\n if has_converter == True and \\\n detector.pixel_size_unit != 'mm':\n conv = Converter('mm')\n detector.pixel_size.x = conv(5.0,\n units=detector.pixel_size_unit)\n detector.pixel_size.y = conv(5.0,\n units=detector.pixel_size_unit)\n else:\n detector.pixel_size.x = 5.0\n detector.pixel_size.y = 5.0\n \n # Store beam center in distance units\n # Det 640 x 640 mm\n if has_converter == True and \\\n detector.beam_center_unit != 'mm':\n conv = Converter('mm')\n detector.beam_center.x = conv(center_x * 5.0,\n units=detector.beam_center_unit)\n detector.beam_center.y = conv(center_y * 5.0,\n units=detector.beam_center_unit)\n else:\n detector.beam_center.x = center_x * 5.0\n detector.beam_center.y = center_y * 5.0\n \n # Detector type\n try:\n detector.name = line_toks[7]\n except:\n # Detector name is not a mandatory entry\n pass\n \n #BCENT(X,Y) A1(mm) A2(mm) A1A2DIST(m) DL/L\n # BSTOP(mm) DET_TYP\n if line.count(\"BCENT\") > 0:\n is_center = True\n \n # Parse the data\n if is_data_started == True:\n toks = line.split()\n\n try:\n _x = float(toks[0])\n _y = float(toks[1])\n _dy = float(toks[2])\n _dx = float(toks[3])\n \n if data_conv_q is not None:\n _x = data_conv_q(_x, units=output.x_unit)\n _dx = data_conv_i(_dx, units=output.x_unit)\n \n if data_conv_i is not None:\n _y = data_conv_i(_y, units=output.y_unit)\n _dy = data_conv_i(_dy, units=output.y_unit)\n \n x = np.append(x, _x)\n y = np.append(y, _y)\n dy = np.append(dy, _dy)\n dx = np.append(dx, _dx)\n \n except:\n # Could not read this data line. If we are here\n # it is because we are in the data section. Just\n # skip it.\n pass\n \n #The 6 columns are | Q (1/A) | I(Q) (1/cm) | std. dev.\n # I(Q) (1/cm) | sigmaQ | meanQ | ShadowFactor|\n if line.count(\"The 6 columns\") > 0:\n is_data_started = True\n \n # Sanity check\n if not len(y) == len(dy):\n msg = \"abs_reader: y and dy have different length\"\n raise ValueError, msg\n # If the data length is zero, consider this as\n # though we were not able to read the file.\n if len(x) == 0:\n raise ValueError, \"ascii_reader: could not load file\"\n \n output.x = x[x != 0]\n output.y = y[x != 0]\n output.dy = dy[x != 0]\n output.dx = dx[x != 0]\n if data_conv_q is not None:\n output.xaxis(\"\\\\rm{Q}\", output.x_unit)\n else:\n output.xaxis(\"\\\\rm{Q}\", 'A^{-1}')\n if data_conv_i is not None:\n output.yaxis(\"\\\\rm{Intensity}\", output.y_unit)\n else:\n output.yaxis(\"\\\\rm{Intensity}\", \"cm^{-1}\")\n \n # Store loading process information\n output.meta_data['loader'] = self.type_name\n return output\n else:\n raise RuntimeError, \"%s is not a file\" % path\n return None\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""Application configuration.
See https://github.com/sloria/cookiecutter-flask for configuration options with other flask-extensions
"""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
# Flask-Assistant Integrations
ASSIST_ACTIONS_ON_GOOGLE = True
CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'
DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
|
normal
|
{
"blob_id": "4cc1c8668a84cc6faadf60053568d155b8852c5f",
"index": 5643,
"step-1": "<mask token>\n\n\nclass DevConfig(Config):\n <mask token>\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-2": "<mask token>\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n <mask token>\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')\n APP_DIR = os.path.abspath(os.path.dirname(__file__))\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-4": "<mask token>\nimport os\n\n\nclass Config(object):\n \"\"\"Base configuration.\"\"\"\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')\n APP_DIR = os.path.abspath(os.path.dirname(__file__))\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Application configuration.\n\nSee https://github.com/sloria/cookiecutter-flask for configuration options with other flask-extensions\n\"\"\"\nimport os\n\n\nclass Config(object):\n \"\"\"Base configuration.\"\"\"\n\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key') # TODO: Change me\n APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n\n # Flask-Assistant Integrations\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n\n ENV = 'dev'\n DEBUG = True\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n\n TESTING = True\n DEBUG = True\n",
"step-ids": [
5,
9,
11,
13,
14
]
}
|
[
5,
9,
11,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import requests
import sys
if __name__ == '__main__':
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
<|reserved_special_token_1|>
#!/usr/bin/python3
"""takes in a URL and an email address, sends a POST request to the passed
URL with the email as a parameter, and finally
displays the body of the response.
"""
import requests
import sys
if __name__ == "__main__":
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
|
flexible
|
{
"blob_id": "0d9c50e55df5aa5614bd5a9679729cf7fa69c5df",
"index": 1461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n url_arg = sys.argv[1]\n email = sys.argv[2]\n params = {'email': email}\n response = requests.post(url_arg, data=params)\n print(response.text)\n",
"step-3": "<mask token>\nimport requests\nimport sys\nif __name__ == '__main__':\n url_arg = sys.argv[1]\n email = sys.argv[2]\n params = {'email': email}\n response = requests.post(url_arg, data=params)\n print(response.text)\n",
"step-4": "#!/usr/bin/python3\n\"\"\"takes in a URL and an email address, sends a POST request to the passed\nURL with the email as a parameter, and finally\ndisplays the body of the response.\n\"\"\"\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n url_arg = sys.argv[1]\n email = sys.argv[2]\n params = {'email': email}\n response = requests.post(url_arg, data=params)\n print(response.text)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
kubectl = local['kubectl']
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
<|reserved_special_token_1|>
from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local['kubectl']
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info(kubectl['delete', name]())
return True
except ProcessExecutionError:
return False
<|reserved_special_token_1|>
from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local["kubectl"]
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl["create", "-f", "-", "--logtostderr"] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info((kubectl["delete", name])())
return True
except ProcessExecutionError:
return False
|
flexible
|
{
"blob_id": "137e80b3bfdc0dba33a3108b37d21d298a8f251d",
"index": 1544,
"step-1": "<mask token>\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-2": "<mask token>\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-3": "<mask token>\nkubectl = local['kubectl']\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-4": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\nfrom task import app\nkubectl = local['kubectl']\n\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-5": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\n\nfrom task import app\n\nkubectl = local[\"kubectl\"]\n\n@app.task\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl[\"create\", \"-f\", \"-\", \"--logtostderr\"] << template)())\n\n@app.task\ndef delete_kube_by_name(name):\n try:\n logging.info((kubectl[\"delete\", name])())\n return True\n except ProcessExecutionError:\n return False\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self, object_id, weight, value):
self.object_id = object_id
self.weight = weight
self.value = value
<|reserved_special_token_0|>
def read_file(file):
f = open(file, 'r')
f.seek(0)
queue = Queue(maxsize=0)
list_elements = []
nodes = []
for line in f:
id = int(line.split('.', 1)[0])
value = int(line.split(' ', 3)[1])
weight = int(line.split(' ', 3)[2].split('\n', 2)[0])
nodes.append(Node(id, weight, value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res = go_explore(queue, nodes)
for i in range(0, len(res) - 2):
if res[i] == 1:
node = nodes[i]
res[i] = {'id': node.object_id, 'weight': node.weight, 'value':
node.value}
res = list(filter(lambda x: x != 0, res))
value = len(res) - 1
weight = len(res) - 2
res[value] = {'total value': res[value]}
res[weight] = {'total weight': res[weight]}
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self, object_id, weight, value):
self.object_id = object_id
self.weight = weight
self.value = value
<|reserved_special_token_0|>
def read_file(file):
f = open(file, 'r')
f.seek(0)
queue = Queue(maxsize=0)
list_elements = []
nodes = []
for line in f:
id = int(line.split('.', 1)[0])
value = int(line.split(' ', 3)[1])
weight = int(line.split(' ', 3)[2].split('\n', 2)[0])
nodes.append(Node(id, weight, value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res = go_explore(queue, nodes)
for i in range(0, len(res) - 2):
if res[i] == 1:
node = nodes[i]
res[i] = {'id': node.object_id, 'weight': node.weight, 'value':
node.value}
res = list(filter(lambda x: x != 0, res))
value = len(res) - 1
weight = len(res) - 2
res[value] = {'total value': res[value]}
res[weight] = {'total weight': res[weight]}
return res
def go_explore(queue, nodes):
best_value = 0
res = []
while not queue.empty():
q = copy(queue.get())
for i in range(len(q)):
if q[i] is -1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
q[i] = 0
queue.put(q)
q_positive = copy(q)
q_positive[len(q_positive) - 1] = value + nodes[i].value
q_positive[len(q_positive) - 2] = weight + nodes[i].weight
q_positive[i] = 1
queue.put(q_positive)
break
elif i == len(q) - 1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start = time.time()
<|reserved_special_token_0|>
process = psutil.Process(os.getpid())
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self, object_id, weight, value):
self.object_id = object_id
self.weight = weight
self.value = value
<|reserved_special_token_0|>
def read_file(file):
f = open(file, 'r')
f.seek(0)
queue = Queue(maxsize=0)
list_elements = []
nodes = []
for line in f:
id = int(line.split('.', 1)[0])
value = int(line.split(' ', 3)[1])
weight = int(line.split(' ', 3)[2].split('\n', 2)[0])
nodes.append(Node(id, weight, value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res = go_explore(queue, nodes)
for i in range(0, len(res) - 2):
if res[i] == 1:
node = nodes[i]
res[i] = {'id': node.object_id, 'weight': node.weight, 'value':
node.value}
res = list(filter(lambda x: x != 0, res))
value = len(res) - 1
weight = len(res) - 2
res[value] = {'total value': res[value]}
res[weight] = {'total weight': res[weight]}
return res
def go_explore(queue, nodes):
best_value = 0
res = []
while not queue.empty():
q = copy(queue.get())
for i in range(len(q)):
if q[i] is -1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
q[i] = 0
queue.put(q)
q_positive = copy(q)
q_positive[len(q_positive) - 1] = value + nodes[i].value
q_positive[len(q_positive) - 2] = weight + nodes[i].weight
q_positive[i] = 1
queue.put(q_positive)
break
elif i == len(q) - 1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
return res
solution = read_file('Knapsack/data_knapsack')
for data in solution:
print(data)
end = time.time()
print(end - start)
print(process.memory_info().rss)
<|reserved_special_token_1|>
import time
import os
import psutil
start = time.time()
from queue import Queue
from copy import copy
process = psutil.Process(os.getpid())
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self, object_id, weight, value):
self.object_id = object_id
self.weight = weight
self.value = value
<|reserved_special_token_0|>
def read_file(file):
f = open(file, 'r')
f.seek(0)
queue = Queue(maxsize=0)
list_elements = []
nodes = []
for line in f:
id = int(line.split('.', 1)[0])
value = int(line.split(' ', 3)[1])
weight = int(line.split(' ', 3)[2].split('\n', 2)[0])
nodes.append(Node(id, weight, value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res = go_explore(queue, nodes)
for i in range(0, len(res) - 2):
if res[i] == 1:
node = nodes[i]
res[i] = {'id': node.object_id, 'weight': node.weight, 'value':
node.value}
res = list(filter(lambda x: x != 0, res))
value = len(res) - 1
weight = len(res) - 2
res[value] = {'total value': res[value]}
res[weight] = {'total weight': res[weight]}
return res
def go_explore(queue, nodes):
best_value = 0
res = []
while not queue.empty():
q = copy(queue.get())
for i in range(len(q)):
if q[i] is -1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
q[i] = 0
queue.put(q)
q_positive = copy(q)
q_positive[len(q_positive) - 1] = value + nodes[i].value
q_positive[len(q_positive) - 2] = weight + nodes[i].weight
q_positive[i] = 1
queue.put(q_positive)
break
elif i == len(q) - 1:
weight = q[len(q) - 2]
value = q[len(q) - 1]
if weight <= 420:
if value > best_value:
res = q
best_value = value
return res
solution = read_file('Knapsack/data_knapsack')
for data in solution:
print(data)
end = time.time()
print(end - start)
print(process.memory_info().rss)
<|reserved_special_token_1|>
import time
import os
import psutil
start = time.time()
from queue import Queue
from copy import copy
process = psutil.Process(os.getpid())
class Node:
object_id = 0
weight = 0
value = 0
def __init__(self,object_id,weight,value):
self.object_id=object_id
self.weight=weight
self.value=value
""" First we need to extract values from the file"""
def read_file(file):
f = open(file, "r")
f.seek(0)
queue=Queue(maxsize=0)
list_elements=[]
nodes=[]
for line in f:
id=int(line.split(".", 1)[0])
value= int(line.split(" ", 3)[1])
weight=int(line.split(" ", 3)[2].split('\n', 2)[0])
nodes.append(Node(id,weight,value))
list_elements.append(-1)
list_elements.append(0)
list_elements.append(0)
queue.put(list_elements)
res=go_explore(queue,nodes)
for i in range(0,len(res)-2):
if(res[i]==1):
node=nodes[i]
res[i]={"id":node.object_id,"weight":node.weight,"value":node.value}
res=list(filter(lambda x: x != 0, res))
value=len(res)-1
weight=len(res)-2
res[value]={"total value":res[value]}
res[weight]={"total weight":res[weight]}
return res
def go_explore(queue,nodes):
best_value = 0
res=[]
while not queue.empty():
q=copy(queue.get())
for i in range(len(q)):
if q[i] is -1:
weight = q[len(q)-2]
value = q[len(q)-1]
if weight<=420:
if value > best_value:
res = q
best_value=value
q[i]=0
queue.put(q)
q_positive= copy(q)
q_positive[len(q_positive)-1]=value+nodes[i].value
q_positive[len(q_positive)-2]=weight+nodes[i].weight
q_positive[i]=1
queue.put(q_positive)
break
elif i == len(q)-1:
weight = q[len(q)-2]
value = q[len(q)-1]
if weight<=420:
if value > best_value:
res = q
best_value=value
return res
solution=read_file('Knapsack/data_knapsack')
for data in solution:
print(data)
end = time.time()
print(end - start)
print(process.memory_info().rss)
|
flexible
|
{
"blob_id": "be408b349e2795101b525ad8d948dbf52cab81bf",
"index": 4281,
"step-1": "<mask token>\n\n\nclass Node:\n object_id = 0\n weight = 0\n value = 0\n\n def __init__(self, object_id, weight, value):\n self.object_id = object_id\n self.weight = weight\n self.value = value\n\n\n<mask token>\n\n\ndef read_file(file):\n f = open(file, 'r')\n f.seek(0)\n queue = Queue(maxsize=0)\n list_elements = []\n nodes = []\n for line in f:\n id = int(line.split('.', 1)[0])\n value = int(line.split(' ', 3)[1])\n weight = int(line.split(' ', 3)[2].split('\\n', 2)[0])\n nodes.append(Node(id, weight, value))\n list_elements.append(-1)\n list_elements.append(0)\n list_elements.append(0)\n queue.put(list_elements)\n res = go_explore(queue, nodes)\n for i in range(0, len(res) - 2):\n if res[i] == 1:\n node = nodes[i]\n res[i] = {'id': node.object_id, 'weight': node.weight, 'value':\n node.value}\n res = list(filter(lambda x: x != 0, res))\n value = len(res) - 1\n weight = len(res) - 2\n res[value] = {'total value': res[value]}\n res[weight] = {'total weight': res[weight]}\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n object_id = 0\n weight = 0\n value = 0\n\n def __init__(self, object_id, weight, value):\n self.object_id = object_id\n self.weight = weight\n self.value = value\n\n\n<mask token>\n\n\ndef read_file(file):\n f = open(file, 'r')\n f.seek(0)\n queue = Queue(maxsize=0)\n list_elements = []\n nodes = []\n for line in f:\n id = int(line.split('.', 1)[0])\n value = int(line.split(' ', 3)[1])\n weight = int(line.split(' ', 3)[2].split('\\n', 2)[0])\n nodes.append(Node(id, weight, value))\n list_elements.append(-1)\n list_elements.append(0)\n list_elements.append(0)\n queue.put(list_elements)\n res = go_explore(queue, nodes)\n for i in range(0, len(res) - 2):\n if res[i] == 1:\n node = nodes[i]\n res[i] = {'id': node.object_id, 'weight': node.weight, 'value':\n node.value}\n res = list(filter(lambda x: x != 0, res))\n value = len(res) - 1\n weight = len(res) - 2\n res[value] = {'total value': res[value]}\n res[weight] = {'total weight': res[weight]}\n return res\n\n\ndef go_explore(queue, nodes):\n best_value = 0\n res = []\n while not queue.empty():\n q = copy(queue.get())\n for i in range(len(q)):\n if q[i] is -1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n q[i] = 0\n queue.put(q)\n q_positive = copy(q)\n q_positive[len(q_positive) - 1] = value + nodes[i].value\n q_positive[len(q_positive) - 2] = weight + nodes[i].weight\n q_positive[i] = 1\n queue.put(q_positive)\n break\n elif i == len(q) - 1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\nstart = time.time()\n<mask token>\nprocess = psutil.Process(os.getpid())\n\n\nclass Node:\n object_id = 0\n weight = 0\n value = 0\n\n def __init__(self, object_id, weight, value):\n self.object_id = object_id\n self.weight = weight\n self.value = value\n\n\n<mask token>\n\n\ndef read_file(file):\n f = open(file, 'r')\n f.seek(0)\n queue = Queue(maxsize=0)\n list_elements = []\n nodes = []\n for line in f:\n id = int(line.split('.', 1)[0])\n value = int(line.split(' ', 3)[1])\n weight = int(line.split(' ', 3)[2].split('\\n', 2)[0])\n nodes.append(Node(id, weight, value))\n list_elements.append(-1)\n list_elements.append(0)\n list_elements.append(0)\n queue.put(list_elements)\n res = go_explore(queue, nodes)\n for i in range(0, len(res) - 2):\n if res[i] == 1:\n node = nodes[i]\n res[i] = {'id': node.object_id, 'weight': node.weight, 'value':\n node.value}\n res = list(filter(lambda x: x != 0, res))\n value = len(res) - 1\n weight = len(res) - 2\n res[value] = {'total value': res[value]}\n res[weight] = {'total weight': res[weight]}\n return res\n\n\ndef go_explore(queue, nodes):\n best_value = 0\n res = []\n while not queue.empty():\n q = copy(queue.get())\n for i in range(len(q)):\n if q[i] is -1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n q[i] = 0\n queue.put(q)\n q_positive = copy(q)\n q_positive[len(q_positive) - 1] = value + nodes[i].value\n q_positive[len(q_positive) - 2] = weight + nodes[i].weight\n q_positive[i] = 1\n queue.put(q_positive)\n break\n elif i == len(q) - 1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n return res\n\n\nsolution = read_file('Knapsack/data_knapsack')\nfor data in solution:\n print(data)\nend = time.time()\nprint(end - start)\nprint(process.memory_info().rss)\n",
"step-4": "import time\nimport os\nimport psutil\nstart = time.time()\nfrom queue import Queue\nfrom copy import copy\nprocess = psutil.Process(os.getpid())\n\n\nclass Node:\n object_id = 0\n weight = 0\n value = 0\n\n def __init__(self, object_id, weight, value):\n self.object_id = object_id\n self.weight = weight\n self.value = value\n\n\n<mask token>\n\n\ndef read_file(file):\n f = open(file, 'r')\n f.seek(0)\n queue = Queue(maxsize=0)\n list_elements = []\n nodes = []\n for line in f:\n id = int(line.split('.', 1)[0])\n value = int(line.split(' ', 3)[1])\n weight = int(line.split(' ', 3)[2].split('\\n', 2)[0])\n nodes.append(Node(id, weight, value))\n list_elements.append(-1)\n list_elements.append(0)\n list_elements.append(0)\n queue.put(list_elements)\n res = go_explore(queue, nodes)\n for i in range(0, len(res) - 2):\n if res[i] == 1:\n node = nodes[i]\n res[i] = {'id': node.object_id, 'weight': node.weight, 'value':\n node.value}\n res = list(filter(lambda x: x != 0, res))\n value = len(res) - 1\n weight = len(res) - 2\n res[value] = {'total value': res[value]}\n res[weight] = {'total weight': res[weight]}\n return res\n\n\ndef go_explore(queue, nodes):\n best_value = 0\n res = []\n while not queue.empty():\n q = copy(queue.get())\n for i in range(len(q)):\n if q[i] is -1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n q[i] = 0\n queue.put(q)\n q_positive = copy(q)\n q_positive[len(q_positive) - 1] = value + nodes[i].value\n q_positive[len(q_positive) - 2] = weight + nodes[i].weight\n q_positive[i] = 1\n queue.put(q_positive)\n break\n elif i == len(q) - 1:\n weight = q[len(q) - 2]\n value = q[len(q) - 1]\n if weight <= 420:\n if value > best_value:\n res = q\n best_value = value\n return res\n\n\nsolution = read_file('Knapsack/data_knapsack')\nfor data in solution:\n print(data)\nend = time.time()\nprint(end - start)\nprint(process.memory_info().rss)\n",
"step-5": "import time\nimport os\nimport psutil\nstart = time.time()\nfrom queue import Queue\nfrom copy import copy\n\nprocess = psutil.Process(os.getpid())\n\nclass Node:\n object_id = 0\n weight = 0\n value = 0\n \n def __init__(self,object_id,weight,value):\n self.object_id=object_id\n self.weight=weight\n self.value=value\n\n\n\n\"\"\" First we need to extract values from the file\"\"\"\ndef read_file(file):\n f = open(file, \"r\")\n f.seek(0)\n queue=Queue(maxsize=0) \n list_elements=[]\n nodes=[]\n for line in f:\n id=int(line.split(\".\", 1)[0])\n value= int(line.split(\" \", 3)[1])\n weight=int(line.split(\" \", 3)[2].split('\\n', 2)[0])\n nodes.append(Node(id,weight,value))\n list_elements.append(-1)\n list_elements.append(0)\n list_elements.append(0)\n\n queue.put(list_elements)\n res=go_explore(queue,nodes) \n for i in range(0,len(res)-2):\n if(res[i]==1):\n node=nodes[i]\n res[i]={\"id\":node.object_id,\"weight\":node.weight,\"value\":node.value}\n res=list(filter(lambda x: x != 0, res))\n \n value=len(res)-1\n weight=len(res)-2\n res[value]={\"total value\":res[value]}\n res[weight]={\"total weight\":res[weight]}\n return res\n\n\n\ndef go_explore(queue,nodes):\n best_value = 0\n res=[]\n while not queue.empty():\n q=copy(queue.get())\n for i in range(len(q)):\n if q[i] is -1:\n weight = q[len(q)-2]\n value = q[len(q)-1]\n if weight<=420:\n if value > best_value:\n res = q\n best_value=value\n q[i]=0\n queue.put(q)\n q_positive= copy(q)\n q_positive[len(q_positive)-1]=value+nodes[i].value\n q_positive[len(q_positive)-2]=weight+nodes[i].weight\n q_positive[i]=1\n queue.put(q_positive)\n break\n elif i == len(q)-1:\n weight = q[len(q)-2]\n value = q[len(q)-1]\n if weight<=420:\n if value > best_value:\n res = q\n best_value=value\n \n return res \n\n\n \n \nsolution=read_file('Knapsack/data_knapsack')\n\n\n\nfor data in solution:\n print(data)\n\nend = time.time()\nprint(end - start)\nprint(process.memory_info().rss)\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
def main():
root = tk.Tk()
root.title('DailyFudan')
set_win_center(root, 700, 350)
root.resizable(0, 0)
lblid = tk.Label(root, text='学号:')
lblid.grid(row=0, column=0)
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
lblPW = tk.Label(root, text='Ehall密码:')
lblPW.grid(row=1, column=0)
entPW = tk.Entry(root, show='*')
entPW.grid(row=1, column=1)
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value='上海市 杨浦区')
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value='上海')
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value='上海市')
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.
N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {'id': id, 'pw': pw}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {'area': varArea.get(), 'province': varProv.get(
), 'city': varCity.get()}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = '>>填报成功!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>今日已填报!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>登录失败!%s %s\n' % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert('insert', log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
"""获取窗口宽度,默认200"""
curWidth = root.winfo_width()
if not curHight:
"""获取窗口高度,默认200"""
curHight = root.winfo_height()
scn_w, scn_h = root.maxsize()
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False,
headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title('DailyFudan')
set_win_center(root, 700, 350)
root.resizable(0, 0)
lblid = tk.Label(root, text='学号:')
lblid.grid(row=0, column=0)
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
lblPW = tk.Label(root, text='Ehall密码:')
lblPW.grid(row=1, column=0)
entPW = tk.Entry(root, show='*')
entPW.grid(row=1, column=1)
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value='上海市 杨浦区')
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value='上海')
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value='上海市')
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.
N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {'id': id, 'pw': pw}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {'area': varArea.get(), 'province': varProv.get(
), 'city': varCity.get()}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = '>>填报成功!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>今日已填报!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>登录失败!%s %s\n' % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert('insert', log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
"""获取窗口宽度,默认200"""
curWidth = root.winfo_width()
if not curHight:
"""获取窗口高度,默认200"""
curHight = root.winfo_height()
scn_w, scn_h = root.maxsize()
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False,
headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title('DailyFudan')
set_win_center(root, 700, 350)
root.resizable(0, 0)
lblid = tk.Label(root, text='学号:')
lblid.grid(row=0, column=0)
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
lblPW = tk.Label(root, text='Ehall密码:')
lblPW.grid(row=1, column=0)
entPW = tk.Entry(root, show='*')
entPW.grid(row=1, column=1)
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value='上海市 杨浦区')
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value='上海')
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value='上海市')
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.
N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {'id': id, 'pw': pw}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {'area': varArea.get(), 'province': varProv.get(
), 'city': varCity.get()}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = '>>填报成功!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>今日已填报!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>登录失败!%s %s\n' % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert('insert', log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
import requests
import tkinter as tk
from login import Ehall
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
"""获取窗口宽度,默认200"""
curWidth = root.winfo_width()
if not curHight:
"""获取窗口高度,默认200"""
curHight = root.winfo_height()
scn_w, scn_h = root.maxsize()
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False,
headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title('DailyFudan')
set_win_center(root, 700, 350)
root.resizable(0, 0)
lblid = tk.Label(root, text='学号:')
lblid.grid(row=0, column=0)
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
lblPW = tk.Label(root, text='Ehall密码:')
lblPW.grid(row=1, column=0)
entPW = tk.Entry(root, show='*')
entPW.grid(row=1, column=1)
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value='上海市 杨浦区')
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value='上海')
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value='上海市')
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.
N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {'id': id, 'pw': pw}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {'area': varArea.get(), 'province': varProv.get(
), 'city': varCity.get()}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = '>>填报成功!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>今日已填报!%s %s\n' % (ehall.username, time.ctime())
else:
log = '>>登录失败!%s %s\n' % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert('insert', log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
# ! -*- coding: utf-8 -*-
'''
@Time : 2020/6/4 16:33
@Author : MaohuaYang
@Contact : maohuay@hotmail.com
@File : pinganFudan-GUI.py
@Software: PyCharm
'''
import time
import requests
import tkinter as tk
from login import Ehall
def set_win_center(root, curWidth='', curHight=''):
"""
设置窗口大小,并居中显示
:param root:主窗体实例
:param curWidth:窗口宽度,非必填,默认200
:param curHight:窗口高度,非必填,默认200
:return:无
"""
if not curWidth:
'''获取窗口宽度,默认200'''
curWidth = root.winfo_width()
if not curHight:
'''获取窗口高度,默认200'''
curHight = root.winfo_height()
# print(curWidth, curHight)
# 获取屏幕宽度和高度
scn_w, scn_h = root.maxsize()
# print(scn_w, scn_h)
# 计算中心坐标
cen_x = (scn_w - curWidth) / 2
cen_y = (scn_h - curHight) / 2
# print(cen_x, cen_y)
# 设置窗口初始大小和位置
size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)
root.geometry(size_xy)
def sign_up(ehall, address_info):
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'
response = ehall.session.get(url, headers=ehall.headers, verify=False)
data = response.json()
url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'
data['d']['info'].update(address_info)
post_data = data['d']['info']
response = ehall.session.post(url, data=post_data, verify=False, headers=ehall.headers)
return response.json()
def main():
root = tk.Tk()
root.title("DailyFudan")
set_win_center(root, 700, 350)
root.resizable(0, 0)
# user ID
lblid = tk.Label(root, text="学号:")
lblid.grid(row=0, column=0)
#lid.pack()
entID = tk.Entry(root)
entID.grid(row=0, column=1, padx=25, pady=0)
#entID.pack()
# password
lblPW = tk.Label(root, text="Ehall密码:")
lblPW.grid(row=1, column=0)
#lPW.pack()
entPW = tk.Entry(root, show="*")
entPW.grid(row=1, column=1)
#entPW.pack()
# location information
lblArea = tk.Label(root, text='区域:')
lblArea.grid(row=2, column=0)
varArea = tk.StringVar(value="上海市 杨浦区")
entArea = tk.Entry(root, textvariable=varArea, width=20)
entArea.grid(row=2, column=1)
#entArea.pack()
lblProv = tk.Label(root, text='省份:')
lblProv.grid(row=3, column=0)
varProv = tk.StringVar(value="上海")
entProv = tk.Entry(root, textvariable=varProv, width=20)
entProv.grid(row=3, column=1)
#entProv.pack()
lblCity = tk.Label(root, text='城市:')
lblCity.grid(row=4, column=0)
varCity = tk.StringVar(value="上海市")
entCity = tk.Entry(root, textvariable=varCity, width=20)
entCity.grid(row=4, column=1)
#entCity.pack()
# auto submit
# to be continue
# log area
scroll = tk.Scrollbar()
textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')
textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S+tk.W+tk.E+tk.N)
scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.N, ipadx=0)
scroll.config(command=textlog.yview)
textlog.config(yscrollcommand=scroll.set)
def submit_btn_cmd():
id = entID.get().strip()
pw = entPW.get().strip()
config = {
'id': id,
'pw': pw
}
ehall = Ehall(config)
ehall.login()
if ehall.username:
address_info = {
"area": varArea.get(),
"province": varProv.get(),
"city": varCity.get()
}
data = sign_up(ehall, address_info)
print(data)
if data['e'] == 0:
log = ">>填报成功!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>今日已填报!%s %s\n" % (ehall.username, time.ctime())
else:
log = ">>登录失败!%s %s\n" % (ehall.username, time.ctime())
textlog.config(state=tk.NORMAL)
textlog.insert("insert", log)
textlog.config(state=tk.DISABLED)
btuExit = tk.Button(root, text='退出', command=root.quit, width=10)
btuExit.grid(row=5, column=1, pady=2)
btuSub = tk.Button(root, text="提交", command=submit_btn_cmd, width=10)
btuSub.grid(row=5, column=0, pady=2, padx=20)
root.mainloop()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "d133a07f69d2dadb5559d881b01050abb2a9602b",
"index": 3891,
"step-1": "<mask token>\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport time\nimport requests\nimport tkinter as tk\nfrom login import Ehall\n\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n \"\"\"获取窗口宽度,默认200\"\"\"\n curWidth = root.winfo_width()\n if not curHight:\n \"\"\"获取窗口高度,默认200\"\"\"\n curHight = root.winfo_height()\n scn_w, scn_h = root.maxsize()\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False,\n headers=ehall.headers)\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title('DailyFudan')\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n lblid = tk.Label(root, text='学号:')\n lblid.grid(row=0, column=0)\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n lblPW = tk.Label(root, text='Ehall密码:')\n lblPW.grid(row=1, column=0)\n entPW = tk.Entry(root, show='*')\n entPW.grid(row=1, column=1)\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value='上海市 杨浦区')\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value='上海')\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value='上海市')\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n scroll = tk.Scrollbar()\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S + tk.W + tk.E + tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.\n N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {'id': id, 'pw': pw}\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {'area': varArea.get(), 'province': varProv.get(\n ), 'city': varCity.get()}\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = '>>填报成功!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>今日已填报!%s %s\\n' % (ehall.username, time.ctime())\n else:\n log = '>>登录失败!%s %s\\n' % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert('insert', log)\n textlog.config(state=tk.DISABLED)\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text='提交', command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n# ! -*- coding: utf-8 -*-\n'''\n@Time : 2020/6/4 16:33\n@Author : MaohuaYang\n@Contact : maohuay@hotmail.com\n@File : pinganFudan-GUI.py\n@Software: PyCharm\n'''\n\nimport time\nimport requests\nimport tkinter as tk\nfrom login import Ehall\n\ndef set_win_center(root, curWidth='', curHight=''):\n \"\"\"\n 设置窗口大小,并居中显示\n :param root:主窗体实例\n :param curWidth:窗口宽度,非必填,默认200\n :param curHight:窗口高度,非必填,默认200\n :return:无\n \"\"\"\n if not curWidth:\n '''获取窗口宽度,默认200'''\n curWidth = root.winfo_width()\n if not curHight:\n '''获取窗口高度,默认200'''\n curHight = root.winfo_height()\n # print(curWidth, curHight)\n\n # 获取屏幕宽度和高度\n scn_w, scn_h = root.maxsize()\n # print(scn_w, scn_h)\n\n # 计算中心坐标\n cen_x = (scn_w - curWidth) / 2\n cen_y = (scn_h - curHight) / 2\n # print(cen_x, cen_y)\n\n # 设置窗口初始大小和位置\n size_xy = '%dx%d+%d+%d' % (curWidth, curHight, cen_x, cen_y)\n root.geometry(size_xy)\n\ndef sign_up(ehall, address_info):\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/get-info'\n response = ehall.session.get(url, headers=ehall.headers, verify=False)\n data = response.json()\n\n url = 'https://zlapp.fudan.edu.cn/ncov/wap/fudan/save'\n data['d']['info'].update(address_info)\n post_data = data['d']['info']\n response = ehall.session.post(url, data=post_data, verify=False, headers=ehall.headers)\n\n return response.json()\n\n\ndef main():\n root = tk.Tk()\n root.title(\"DailyFudan\")\n set_win_center(root, 700, 350)\n root.resizable(0, 0)\n\n # user ID\n lblid = tk.Label(root, text=\"学号:\")\n lblid.grid(row=0, column=0)\n #lid.pack()\n\n entID = tk.Entry(root)\n entID.grid(row=0, column=1, padx=25, pady=0)\n #entID.pack()\n # password\n lblPW = tk.Label(root, text=\"Ehall密码:\")\n lblPW.grid(row=1, column=0)\n #lPW.pack()\n entPW = tk.Entry(root, show=\"*\")\n entPW.grid(row=1, column=1)\n #entPW.pack()\n\n # location information\n lblArea = tk.Label(root, text='区域:')\n lblArea.grid(row=2, column=0)\n varArea = tk.StringVar(value=\"上海市 杨浦区\")\n entArea = tk.Entry(root, textvariable=varArea, width=20)\n entArea.grid(row=2, column=1)\n #entArea.pack()\n lblProv = tk.Label(root, text='省份:')\n lblProv.grid(row=3, column=0)\n varProv = tk.StringVar(value=\"上海\")\n entProv = tk.Entry(root, textvariable=varProv, width=20)\n entProv.grid(row=3, column=1)\n #entProv.pack()\n lblCity = tk.Label(root, text='城市:')\n lblCity.grid(row=4, column=0)\n varCity = tk.StringVar(value=\"上海市\")\n entCity = tk.Entry(root, textvariable=varCity, width=20)\n entCity.grid(row=4, column=1)\n #entCity.pack()\n\n # auto submit\n # to be continue\n\n # log area\n scroll = tk.Scrollbar()\n\n textlog = tk.Text(root, state=tk.DISABLED, width=50, bg='lightgray')\n textlog.grid(row=0, rowspan=6, column=2, sticky=tk.S+tk.W+tk.E+tk.N)\n scroll.grid(row=0, rowspan=6, column=3, sticky=tk.S + tk.W + tk.E + tk.N, ipadx=0)\n scroll.config(command=textlog.yview)\n textlog.config(yscrollcommand=scroll.set)\n\n def submit_btn_cmd():\n id = entID.get().strip()\n pw = entPW.get().strip()\n config = {\n 'id': id,\n 'pw': pw\n }\n ehall = Ehall(config)\n ehall.login()\n if ehall.username:\n address_info = {\n \"area\": varArea.get(),\n \"province\": varProv.get(),\n \"city\": varCity.get()\n }\n data = sign_up(ehall, address_info)\n print(data)\n if data['e'] == 0:\n log = \">>填报成功!%s %s\\n\" % (ehall.username, time.ctime())\n else:\n log = \">>今日已填报!%s %s\\n\" % (ehall.username, time.ctime())\n else:\n log = \">>登录失败!%s %s\\n\" % (ehall.username, time.ctime())\n textlog.config(state=tk.NORMAL)\n textlog.insert(\"insert\", log)\n textlog.config(state=tk.DISABLED)\n\n btuExit = tk.Button(root, text='退出', command=root.quit, width=10)\n btuExit.grid(row=5, column=1, pady=2)\n btuSub = tk.Button(root, text=\"提交\", command=submit_btn_cmd, width=10)\n btuSub.grid(row=5, column=0, pady=2, padx=20)\n\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import time
import binascii
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python3
'''
generator.py
This program inputs a strings, and outputs the corresponding hex
Creator: Ethan Knight
Email: ethantknight@gmail.com
Published: 20181116
'''
import sys
import time
import binascii
def main():
print("\n", sys.version_info)
try:
while True:
print("\n\nPress Ctrl+C to exit.")
usr=test()
out=binascii.hexlify(bytes(usr, encoding="utf8"))
print("\nHex:\t\t", out)
print("Base 10:\t", int(out,16))
time.sleep(.5)
except KeyboardInterrupt:
print("\tProgram Terminated\n\n")
sys.exit(0)
def test():
while True:
usr=input("Enter the string to convert\n\n\t")
if usr!="":
return usr
else:
print("\nNo string entered.")
if __name__=="__main__":
main()
|
flexible
|
{
"blob_id": "a52cbe6dbf4b4fc82d09e5f34e6e135933f3af38",
"index": 1418,
"step-1": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport time\nimport binascii\n\n\ndef main():\n print('\\n', sys.version_info)\n try:\n while True:\n print('\\n\\nPress Ctrl+C to exit.')\n usr = test()\n out = binascii.hexlify(bytes(usr, encoding='utf8'))\n print('\\nHex:\\t\\t', out)\n print('Base 10:\\t', int(out, 16))\n time.sleep(0.5)\n except KeyboardInterrupt:\n print('\\tProgram Terminated\\n\\n')\n sys.exit(0)\n\n\ndef test():\n while True:\n usr = input('Enter the string to convert\\n\\n\\t')\n if usr != '':\n return usr\n else:\n print('\\nNo string entered.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n\n'''\n generator.py\n This program inputs a strings, and outputs the corresponding hex\n Creator: Ethan Knight\n Email: ethantknight@gmail.com\n Published: 20181116\n'''\n\nimport sys\nimport time\nimport binascii\n\ndef main():\n print(\"\\n\", sys.version_info)\n try:\n while True:\n print(\"\\n\\nPress Ctrl+C to exit.\")\n usr=test()\n out=binascii.hexlify(bytes(usr, encoding=\"utf8\"))\n print(\"\\nHex:\\t\\t\", out)\n print(\"Base 10:\\t\", int(out,16))\n time.sleep(.5)\n except KeyboardInterrupt:\n print(\"\\tProgram Terminated\\n\\n\")\n sys.exit(0)\n\ndef test():\n while True:\n usr=input(\"Enter the string to convert\\n\\n\\t\")\n if usr!=\"\":\n return usr\n else:\n print(\"\\nNo string entered.\")\n\nif __name__==\"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from checkio.home.long_repeat import long_repeat
def test_long_repeat():
assert long_repeat("sdsffffse") == 4, "First"
assert long_repeat("ddvvrwwwrggg") == 3, "Second"
def test_fails_1():
assert long_repeat("") == 0, "Empty String"
def test_fails_2():
assert long_repeat("aa") == 2
|
normal
|
{
"blob_id": "b459919e779063247c176e127368c687c903cf0f",
"index": 7869,
"step-1": "<mask token>\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-3": "<mask token>\n\n\ndef test_long_repeat():\n assert long_repeat('sdsffffse') == 4, 'First'\n assert long_repeat('ddvvrwwwrggg') == 3, 'Second'\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-4": "from checkio.home.long_repeat import long_repeat\n\n\ndef test_long_repeat():\n assert long_repeat('sdsffffse') == 4, 'First'\n assert long_repeat('ddvvrwwwrggg') == 3, 'Second'\n\n\ndef test_fails_1():\n assert long_repeat('') == 0, 'Empty String'\n\n\ndef test_fails_2():\n assert long_repeat('aa') == 2\n",
"step-5": "from checkio.home.long_repeat import long_repeat\n\n\ndef test_long_repeat():\n assert long_repeat(\"sdsffffse\") == 4, \"First\"\n assert long_repeat(\"ddvvrwwwrggg\") == 3, \"Second\"\n\n\ndef test_fails_1():\n assert long_repeat(\"\") == 0, \"Empty String\"\n\n\ndef test_fails_2():\n assert long_repeat(\"aa\") == 2\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def longestConsecutive(self, nums) -> int:
s = set(nums)
answer = 0
# n = len(s)
for value in s:
if value - 1 not in s:
j = value
while (j in s):
j = j + 1
answer = max(answer, j - value)
return answer
|
normal
|
{
"blob_id": "9cb5573fada7a1529507da1d031f836044c10066",
"index": 2474,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def longestConsecutive(self, nums) ->int:\n s = set(nums)\n answer = 0\n for value in s:\n if value - 1 not in s:\n j = value\n while j in s:\n j = j + 1\n answer = max(answer, j - value)\n return answer\n",
"step-4": "class Solution:\n def longestConsecutive(self, nums) -> int:\n\n s = set(nums)\n answer = 0\n # n = len(s)\n\n for value in s:\n\n if value - 1 not in s:\n j = value\n while (j in s):\n j = j + 1\n\n answer = max(answer, j - value)\n\n return answer",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
# from django.contrib.admin import AdminSite
# class MyAdminSite(AdminSite):
# site_header = 'Finder Administration'
# admin_site = MyAdminSite(name='Finder Admin')
from finder.models import Database, Column, GpsData, Alarm, System
class ColumnInline(admin.TabularInline):
model = Column
class GPSInline(admin.TabularInline):
model = GpsData
classes= ('collapse',)
class DatabaseAdmin(admin.ModelAdmin):
# fieldsets = [
# (None, {'fields': ['database_id']}),
# ('Database Info', {#'classes': ('collapse',),
# 'fields': ['rows',
# 'missing_rows',
# 'columns_count',
# 'columns_geo_count',
# 'columns_numeric_count',
# 'columns_temporal_count',
# 'columns_text_count',
# 'values',
# 'values_missing']}
# ),
# ('Profiler Info', {#'classes': ('collapse',),
# 'fields': ['profiler_input_file',
# 'profiler_status',
# 'profiler_time_begin',
# 'profiler_time_end',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('Socrata Metadata', {#'classes': ('collapse',),
# 'fields': ['socrata_status',
# 'socrata_description',
# 'socrata_category',
# 'socrata_owner',
# 'socrata_author',
# 'socrata_download_count',
# 'socrata_view_count']}
# ),
# ('GPS Data', {#'classes': ('collapse',),
# 'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}
# ),
# ]
list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status',
#'socrata_primary', 'rows', 'columns_count', 'missing_percent',
'source_agency',
'has_bounding_box')
search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)
list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']
prepopulated_fields = {'name': ('database_id',)}
inlines = [ColumnInline
#, GPSInline
]
admin.site.register(Database, DatabaseAdmin)
class AlarmAdmin(admin.ModelAdmin):
list_display = ['name', 'severity', 'query']
list_filter = ['severity']
admin.site.register(Alarm, AlarmAdmin)
class SystemAdmin(admin.ModelAdmin):
list_display = ['update_time', 'source_file']
admin.site.register(System, SystemAdmin)
|
normal
|
{
"blob_id": "e1968e0d6146ce7656505eeed8e9f31daa4b558a",
"index": 5447,
"step-1": "<mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GPSInline(admin.TabularInline):\n <mask token>\n <mask token>\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\n<mask token>\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\n<mask token>\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes = 'collapse',\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n list_display = ('database_id', 'name', 'category',\n 'short_profiler_status', 'socrata_status', 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status', 'database_id', 'category', 'name',\n 'description', 'owner', 'tags')\n list_filter = ['profiler_status', 'category', 'owner', 'author',\n 'socrata_status']\n prepopulated_fields = {'name': ('database_id',)}\n inlines = [ColumnInline]\n\n\nadmin.site.register(Database, DatabaseAdmin)\n\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\n\nadmin.site.register(Alarm, AlarmAdmin)\n\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\n\nadmin.site.register(System, SystemAdmin)\n",
"step-5": "from django.contrib import admin\n\n# from django.contrib.admin import AdminSite\n# class MyAdminSite(AdminSite):\n# site_header = 'Finder Administration'\n# admin_site = MyAdminSite(name='Finder Admin')\n\n\nfrom finder.models import Database, Column, GpsData, Alarm, System\n\nclass ColumnInline(admin.TabularInline):\n model = Column\n\nclass GPSInline(admin.TabularInline):\n model = GpsData\n classes= ('collapse',)\n\n\nclass DatabaseAdmin(admin.ModelAdmin):\n # fieldsets = [\n # \t\t\t\t(None, {'fields': ['database_id']}),\n # \t\t\t\t('Database Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['rows',\n # \t\t\t\t\t\t\t\t\t\t\t 'missing_rows', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_geo_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_numeric_count', \n # \t\t\t\t\t\t\t\t\t\t\t 'columns_temporal_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'columns_text_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'values',\n # \t\t\t\t\t\t\t\t\t\t\t 'values_missing']}\n # \t\t\t),\n # \t\t\t('Profiler Info', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['profiler_input_file',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_status', \n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_begin',\n # \t\t\t\t\t\t\t\t\t\t\t 'profiler_time_end',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('Socrata Metadata', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': ['socrata_status',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_description', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_category',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_owner',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_author', \n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_download_count',\n # \t\t\t\t\t\t\t\t\t\t\t 'socrata_view_count']}\n # \t\t\t),\n # \t\t\t('GPS Data', {#'classes': ('collapse',),\n # \t\t\t\t\t\t\t\t'fields': [ 'gps_values', 'lat_min', 'lat_max', 'long_min', 'long_max']}\n # \t\t\t),\n # \t\t\t]\n\n list_display = ('database_id', 'name', 'category', 'short_profiler_status', 'socrata_status', \n #'socrata_primary', 'rows', 'columns_count', 'missing_percent', \n 'source_agency',\n 'has_bounding_box')\n search_fields = ('profiler_status','database_id','category','name', 'description','owner','tags',)\n list_filter = ['profiler_status', 'category', 'owner', 'author', 'socrata_status']\n\n prepopulated_fields = {'name': ('database_id',)}\n\n inlines = [ColumnInline\n #, GPSInline\n ]\n \nadmin.site.register(Database, DatabaseAdmin)\n\nclass AlarmAdmin(admin.ModelAdmin):\n list_display = ['name', 'severity', 'query']\n list_filter = ['severity']\n\nadmin.site.register(Alarm, AlarmAdmin)\n\nclass SystemAdmin(admin.ModelAdmin):\n list_display = ['update_time', 'source_file']\n\nadmin.site.register(System, SystemAdmin)\n\n",
"step-ids": [
5,
7,
10,
11,
13
]
}
|
[
5,
7,
10,
11,
13
] |
import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
|
normal
|
{
"blob_id": "5869669f1e3f648c0ddc68683f0b1d2754b40169",
"index": 8714,
"step-1": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\n<mask token>\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-2": "<mask token>\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-3": "<mask token>\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-4": "<mask token>\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = (\n 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n )\nERROR_PLAYER_NOT_FOUND = (\n 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'\n )\nERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'\nERROR_IN_DB = 'Error: \"%s\" is already in the database'\nERROR_SORT_ERROR = \"\"\"Error while sorting list. Make sure all players have at least one win or loss.\n\"\"\"\nERROR_INVALID_SORT = (\n 'Error: Invalid sorting type. Displaying stats as stored.\\n')\n\n\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - num % multiple)\n return num\n\n\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\ndef incrementStats(msgChannel, statsFile, winner, losers):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \"%s\" not found in database' % winner)\n return ERROR_PLAYER_NOT_FOUND % winner\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return ERROR_WIN_IN_LOSE % loser\n if loserIndex < 0:\n print('[ERROR] Loser \"%s\" not found in database' % loser)\n return ERROR_PLAYER_NOT_FOUND % loser\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return ERROR_DUP_LOSER % dupList\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \"%s\" already in database' % player)\n print('[INFO] Database not updated')\n return ERROR_IN_DB % player\n else:\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n if writeDB(statsFile, data.headers, rows):\n print(\"[INFO] %s's data changed\" % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \"%s\" not found in database' % player)\n print('[INFO] Database not updated')\n return ERROR_PLAYER_NOT_FOUND % player\n else:\n del rows[playerIndex]\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \"%s\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n data = readDB(statsFile)\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) +\n float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print(\n '[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n pass\n else:\n print(\n '[ERROR] Invalid sorting type specified. Displaying stats as stored'\n )\n returnMsg = ERROR_INVALID_SORT\n if player == 'ALL':\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n playerString = ''\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace\n )\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str(float(winCount) / (float(winCount) + float(\n loseCount)) * 100)\n winRate = winRate[0:4].rjust(9)\n playerString += (playerName + winCount + loseCount + winRate +\n ' %\\n')\n namePaddingLen = roundMultiple(maxPlayerLen + 2, 2)\n header = ' |' + 'Name'.center(namePaddingLen\n ) + '| Wins | Losses | Win Rate |\\n'\n divider = '-' * len(header) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-5": "import discord\nfrom collections import Counter\nfrom db import readDB, writeDB\n\n\nINFO_DB_SUCCESS = 'Database updated successfully!'\nERROR_DB_ERROR = 'Error: Unable to open database for writing'\nERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'\n\nERROR_PLAYER_NOT_FOUND = 'Error: \\\"%s\\\" not found in database. Check your spelling or use !addplayer first.'\nERROR_WIN_IN_LOSE = 'Error: \\\"%s\\\" already specified as winner.'\nERROR_DUP_LOSER = 'Error: \\\"%s\\\" duplicated in losers list'\n\nERROR_IN_DB = 'Error: \\\"%s\\\" is already in the database'\n\nERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\\n'\nERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\\n'\n\n\n# desc: function to search a list of lists for a name\n# args: name - the name to search the lists for\n# searchList - a list of lists to search for a name\n# retn: the index of the list containing the name or -1 if not found\ndef getIndex(name, searchList):\n for i in range(0, len(searchList)):\n if name in searchList[i]:\n return i\n return -1\n\n\n# desc: function to round a number up to a specific increment. for example,\n# rounding 11 to the nearest multiple of 2 would result in 12\n# args: num - the number to round up\n# multiple - the increment to round to\n# retn: the rounded number\ndef roundMultiple(num, multiple):\n if num % multiple:\n return num + (multiple - (num % multiple))\n return num\n\n\n# desc: function to find duplicate items in a list\n# args: inputList - a list to search for duplicates\n# retn: a list containing the duplicates\ndef findDuplicates(inputList):\n dupList = [k for k, v in Counter(inputList).items() if v > 1]\n return dupList\n\n\n# desc: function to update the database\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# winner - a string containing the winner's name\n# losers - a list of strings containing the losers' names\n# retn: a string indicating success or failure\ndef incrementStats(msgChannel, statsFile, winner, losers):\n # read the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n # check if the winner is actually in the database\n if getIndex(winner, rows) < 0:\n print('[ERROR] Winner \\\"%s\\\" not found in database' % winner)\n return (ERROR_PLAYER_NOT_FOUND % winner)\n\n # check if losers are in database\n for loser in losers:\n # get loser index\n loserIndex = getIndex(loser, rows)\n\n # check against winner to see if the name was duplicated\n if loser == winner:\n print('[ERROR] Winner duplicated in losers field')\n return (ERROR_WIN_IN_LOSE % loser)\n # check if loser was not found in database\n if loserIndex < 0:\n print('[ERROR] Loser \\\"%s\\\" not found in database' % loser)\n return (ERROR_PLAYER_NOT_FOUND % loser)\n\n # check for duplicate losers\n dupList = findDuplicates(losers)\n if len(dupList) > 0:\n print('[ERROR] Duplicate losers found')\n return (ERROR_DUP_LOSER % dupList)\n\n # update stats if we found the winner and all losers\n # get index, get win count, increment and update\n winnerIndex = getIndex(winner, rows)\n winnerVal = int(rows[winnerIndex][1])\n rows[winnerIndex][1] = str(winnerVal + 1)\n\n # same as winner for each loser\n for loser in losers:\n loserIndex = getIndex(loser, rows)\n loserVal = int(rows[loserIndex][2])\n rows[loserIndex][2] = str(loserVal + 1)\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to add a player to the database or edit an existing player\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# player - the name of the player to either add to the db or edit\n# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening\n# wins - the number of wins to assign the player\n# losses - the number of losses to assign the player\n# retn: a string indicating success or failure\ndef editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):\n # open up the database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n playerIndex = getIndex(player, rows)\n\n # check if player is already in database\n if editType == 'ADD':\n if playerIndex > -1:\n print('[ERROR] \\\"%s\\\" already in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_IN_DB % player)\n else:\n # add player to list and resort\n rows.append([player, wins, losses])\n rows.sort(key=lambda name: name[0].capitalize())\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" added to database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'EDIT':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n rows[playerIndex] = [rows[playerIndex][0], wins, losses]\n\n # write the new data to the database file\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] %s\\'s data changed' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n elif editType == 'REMOVE':\n if playerIndex < 0:\n print('[ERROR] \\\"%s\\\" not found in database' % player)\n print('[INFO] Database not updated')\n return (ERROR_PLAYER_NOT_FOUND % player)\n else:\n # delete player from list\n del(rows[playerIndex])\n # write the new data to the database\n if writeDB(statsFile, data.headers, rows):\n print('[INFO] \\\"%s\\\" removed from database' % player)\n return INFO_DB_SUCCESS\n else:\n print('[INFO] Database not updated')\n return ERROR_DB_ERROR\n\n\n# desc: function to display the stats\n# args: msgChannel - the channel the invoking message was sent from\n# statsFile - the name of the database file\n# sortType - the order in which the results should be sorted.\n# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.\n# will revert to 'NAME' if invalid\n# player - NOT IMPLEMENTED - the player to display stats for\n# retn: a string formatted with the database stats\ndef dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):\n # read database\n data = readDB(statsFile)\n # return an error if database not found\n if data == 0:\n return ERROR_DB_NOT_FOUND\n rows = data.rows\n\n print('[INFO] Sort type is %s' % sortType)\n returnMsg = ''\n if sortType == 'WINRATE' or sortType == 'NONE':\n # sort data by win rate\n try:\n rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)\n except ZeroDivisionError:\n print('[ERROR] Tried to divide by zero because of blank player data')\n returnMsg = ERROR_SORT_ERROR\n elif sortType == 'WINS':\n # sort by number of wins and reverse so max is first\n rows.sort(key=lambda wins: float(wins[1]), reverse=True)\n elif sortType == 'LOSSES':\n # sort by number of losses and reverse so max is first\n rows.sort(key=lambda losses: float(losses[2]), reverse=True)\n elif sortType == 'NAME':\n # database is stored sorted by name so dont do anything\n pass\n else:\n print('[ERROR] Invalid sorting type specified. Displaying stats as stored')\n returnMsg = ERROR_INVALID_SORT\n\n if player == 'ALL':\n # get max player length\n maxPlayerLen = 0\n for player in rows:\n if len(player[0]) > maxPlayerLen:\n maxPlayerLen = len(player[0])\n\n # construct a string with all the player info\n playerString = ''\n # adjust start spacing if player length is odd or even to align with pipe\n startSpace = 4 if maxPlayerLen % 2 else 3\n for player in rows:\n playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)\n winCount = player[1].rjust(7)\n loseCount = player[2].rjust(9)\n # calculate win rate\n if float(winCount) <= 0:\n winRate = '0'\n elif float(loseCount) <= 0:\n winRate = ' 100'\n else:\n winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)\n\n # truncate win rate and create string with player info\n winRate = winRate[0:4].rjust(9)\n playerString += playerName + winCount + loseCount + winRate + ' %\\n'\n\n # calculate padding for name field and create header final strings\n namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)\n header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\\n'\n divider = ('-' * len(header)) + '\\n'\n sendString = '```md\\n' + header + divider + playerString + '```'\n\n # return the constructed string\n if len(returnMsg) > 0:\n returnMsg = returnMsg + sendString\n return returnMsg\n return sendString\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
"""
common tests
"""
from django.test import TestCase
from src.core.common import get_method_config
from src.predictive_model.classification.models import ClassificationMethods
from src.predictive_model.models import PredictiveModels
from src.utils.tests_utils import create_test_job, create_test_predictive_model
class TestCommon(TestCase):
def test_get_method_config(self):
job = create_test_job(
predictive_model=create_test_predictive_model(
predictive_model=PredictiveModels.CLASSIFICATION.value,
prediction_method=ClassificationMethods.RANDOM_FOREST.value
)
)
method, config = get_method_config(job)
self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)
self.assertEqual({
'max_depth': None,
'max_features': 'auto',
'n_estimators': 10,
}, config)
|
normal
|
{
"blob_id": "824038a56e8aaf4adf6ec813a5728ab318547582",
"index": 1638,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommon(TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCommon(TestCase):\n\n def test_get_method_config(self):\n job = create_test_job(predictive_model=create_test_predictive_model\n (predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value))\n method, config = get_method_config(job)\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({'max_depth': None, 'max_features': 'auto',\n 'n_estimators': 10}, config)\n",
"step-4": "<mask token>\nfrom django.test import TestCase\nfrom src.core.common import get_method_config\nfrom src.predictive_model.classification.models import ClassificationMethods\nfrom src.predictive_model.models import PredictiveModels\nfrom src.utils.tests_utils import create_test_job, create_test_predictive_model\n\n\nclass TestCommon(TestCase):\n\n def test_get_method_config(self):\n job = create_test_job(predictive_model=create_test_predictive_model\n (predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value))\n method, config = get_method_config(job)\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({'max_depth': None, 'max_features': 'auto',\n 'n_estimators': 10}, config)\n",
"step-5": "\"\"\"\ncommon tests\n\"\"\"\n\nfrom django.test import TestCase\n\nfrom src.core.common import get_method_config\nfrom src.predictive_model.classification.models import ClassificationMethods\nfrom src.predictive_model.models import PredictiveModels\nfrom src.utils.tests_utils import create_test_job, create_test_predictive_model\n\n\nclass TestCommon(TestCase):\n def test_get_method_config(self):\n job = create_test_job(\n predictive_model=create_test_predictive_model(\n predictive_model=PredictiveModels.CLASSIFICATION.value,\n prediction_method=ClassificationMethods.RANDOM_FOREST.value\n )\n )\n\n method, config = get_method_config(job)\n\n self.assertEqual(ClassificationMethods.RANDOM_FOREST.value, method)\n self.assertEqual({\n 'max_depth': None,\n 'max_features': 'auto',\n 'n_estimators': 10,\n }, config)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log.setLevel(logging.DEBUG)
<|reserved_special_token_0|>
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
formatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')
log = logging.getLogger('othello')
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
<|reserved_special_token_1|>
import logging
formatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')
log = logging.getLogger('othello')
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
<|reserved_special_token_1|>
import logging
formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s")
log = logging.getLogger("othello")
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
|
flexible
|
{
"blob_id": "675fbdfd519d00ab10bf613e8abb7338e484fe65",
"index": 57,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.setLevel(logging.DEBUG)\n<mask token>\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-3": "<mask token>\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-4": "import logging\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-5": "import logging\n\n\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] : %(message)s\")\n\nlog = logging.getLogger(\"othello\")\nlog.setLevel(logging.DEBUG)\n\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#str
owog="Delger"
# len()- urt
# lower()- jijigruuleh
# upper()- tomruulah
# capitalize()- ehnii useg tomruulah
# replace()- temdegt solih
print(owog.find("e"))
print(owog.count("e"))
print(owog[2:10])
a=21
b=21
if a>b:
print("a too ih")
elif a==b:
print("tentsuu")
else:
print("b too ih")
a, b = input().split()
for i in range(a, b+1):
print(i)
|
normal
|
{
"blob_id": "c4ca4b5c77c3c912b44a4853be30298ec845c4fd",
"index": 243,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\n<mask token>\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\n<mask token>\nfor i in range(a, b + 1):\n print(i)\n",
"step-3": "owog = 'Delger'\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\na = 21\nb = 21\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\na, b = input().split()\nfor i in range(a, b + 1):\n print(i)\n",
"step-4": "#str\r\nowog=\"Delger\"\r\n# len()- urt\r\n# lower()- jijigruuleh\r\n# upper()- tomruulah\r\n# capitalize()- ehnii useg tomruulah\r\n# replace()- temdegt solih\r\nprint(owog.find(\"e\"))\r\nprint(owog.count(\"e\"))\r\nprint(owog[2:10])\r\n\r\na=21\r\nb=21\r\nif a>b:\r\n print(\"a too ih\")\r\nelif a==b:\r\n print(\"tentsuu\")\r\nelse:\r\n print(\"b too ih\")\r\n\r\na, b = input().split()\r\nfor i in range(a, b+1):\r\n print(i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_offset_date(modifed_date, offset_in_days):
return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))
def get_trending_repositories(start_search_date, number_of_results=20):
github_api_uri = 'https://api.github.com'
query_search_url = '{}/search/repositories'.format(github_api_uri)
query_parameters = {'q': 'created:>{}'.format(start_search_date),
'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}
trending_repositories_json_list = requests.get(query_search_url,
query_parameters).json()['items']
result_trending_list = []
for repository in trending_repositories_json_list:
repository_name = repository['name']
repoditory_owner = repository['owner']['login']
result_trending_list.append({'repo_name': str(repository_name),
'repo_owner': str(repoditory_owner), 'stars': repository[
'stargazers_count'], 'issues': repository['open_issues'], 'url':
repository['html_url']})
return result_trending_list
<|reserved_special_token_0|>
def print_result_to_console():
print('Program prints {} most popular repositories since {}\n'.format(
number_of_results, week_earlier_date))
for index, repo in enumerate(top_repositories_list):
good_choice_label = ''
if not repo['issues']:
good_choice_label = 'Try it!'
print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index +
1, repo['url'], repo['stars'], repo['issues'], good_choice_label))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_offset_date(modifed_date, offset_in_days):
return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))
def get_trending_repositories(start_search_date, number_of_results=20):
github_api_uri = 'https://api.github.com'
query_search_url = '{}/search/repositories'.format(github_api_uri)
query_parameters = {'q': 'created:>{}'.format(start_search_date),
'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}
trending_repositories_json_list = requests.get(query_search_url,
query_parameters).json()['items']
result_trending_list = []
for repository in trending_repositories_json_list:
repository_name = repository['name']
repoditory_owner = repository['owner']['login']
result_trending_list.append({'repo_name': str(repository_name),
'repo_owner': str(repoditory_owner), 'stars': repository[
'stargazers_count'], 'issues': repository['open_issues'], 'url':
repository['html_url']})
return result_trending_list
def get_open_issues_amount(repo_owner, repo_name):
github_api_uri = 'https://api.github.com'
query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,
repo_owner, repo_name)
issues_json_data = requests.get(query_search_url).json()
number_of_open_issues = len([x for x in issues_json_data if x['state'] ==
'open'])
return number_of_open_issues
def print_result_to_console():
print('Program prints {} most popular repositories since {}\n'.format(
number_of_results, week_earlier_date))
for index, repo in enumerate(top_repositories_list):
good_choice_label = ''
if not repo['issues']:
good_choice_label = 'Try it!'
print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index +
1, repo['url'], repo['stars'], repo['issues'], good_choice_label))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_offset_date(modifed_date, offset_in_days):
return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))
def get_trending_repositories(start_search_date, number_of_results=20):
github_api_uri = 'https://api.github.com'
query_search_url = '{}/search/repositories'.format(github_api_uri)
query_parameters = {'q': 'created:>{}'.format(start_search_date),
'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}
trending_repositories_json_list = requests.get(query_search_url,
query_parameters).json()['items']
result_trending_list = []
for repository in trending_repositories_json_list:
repository_name = repository['name']
repoditory_owner = repository['owner']['login']
result_trending_list.append({'repo_name': str(repository_name),
'repo_owner': str(repoditory_owner), 'stars': repository[
'stargazers_count'], 'issues': repository['open_issues'], 'url':
repository['html_url']})
return result_trending_list
def get_open_issues_amount(repo_owner, repo_name):
github_api_uri = 'https://api.github.com'
query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,
repo_owner, repo_name)
issues_json_data = requests.get(query_search_url).json()
number_of_open_issues = len([x for x in issues_json_data if x['state'] ==
'open'])
return number_of_open_issues
def print_result_to_console():
print('Program prints {} most popular repositories since {}\n'.format(
number_of_results, week_earlier_date))
for index, repo in enumerate(top_repositories_list):
good_choice_label = ''
if not repo['issues']:
good_choice_label = 'Try it!'
print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index +
1, repo['url'], repo['stars'], repo['issues'], good_choice_label))
if __name__ == '__main__':
date_offset_in_days = -7
week_earlier_date = get_offset_date(date.today(), date_offset_in_days)
number_of_results = 20
top_repositories_list = get_trending_repositories(week_earlier_date,
number_of_results)
print_result_to_console()
<|reserved_special_token_1|>
import requests
from datetime import date
from datetime import timedelta
def get_offset_date(modifed_date, offset_in_days):
return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))
def get_trending_repositories(start_search_date, number_of_results=20):
github_api_uri = 'https://api.github.com'
query_search_url = '{}/search/repositories'.format(github_api_uri)
query_parameters = {'q': 'created:>{}'.format(start_search_date),
'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}
trending_repositories_json_list = requests.get(query_search_url,
query_parameters).json()['items']
result_trending_list = []
for repository in trending_repositories_json_list:
repository_name = repository['name']
repoditory_owner = repository['owner']['login']
result_trending_list.append({'repo_name': str(repository_name),
'repo_owner': str(repoditory_owner), 'stars': repository[
'stargazers_count'], 'issues': repository['open_issues'], 'url':
repository['html_url']})
return result_trending_list
def get_open_issues_amount(repo_owner, repo_name):
github_api_uri = 'https://api.github.com'
query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,
repo_owner, repo_name)
issues_json_data = requests.get(query_search_url).json()
number_of_open_issues = len([x for x in issues_json_data if x['state'] ==
'open'])
return number_of_open_issues
def print_result_to_console():
print('Program prints {} most popular repositories since {}\n'.format(
number_of_results, week_earlier_date))
for index, repo in enumerate(top_repositories_list):
good_choice_label = ''
if not repo['issues']:
good_choice_label = 'Try it!'
print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index +
1, repo['url'], repo['stars'], repo['issues'], good_choice_label))
if __name__ == '__main__':
date_offset_in_days = -7
week_earlier_date = get_offset_date(date.today(), date_offset_in_days)
number_of_results = 20
top_repositories_list = get_trending_repositories(week_earlier_date,
number_of_results)
print_result_to_console()
|
flexible
|
{
"blob_id": "8a7536b998a6d122e2e7529af1ebe2a0f025303f",
"index": 5620,
"step-1": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\n<mask token>\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\nif __name__ == '__main__':\n date_offset_in_days = -7\n week_earlier_date = get_offset_date(date.today(), date_offset_in_days)\n number_of_results = 20\n top_repositories_list = get_trending_repositories(week_earlier_date,\n number_of_results)\n print_result_to_console()\n",
"step-4": "import requests\nfrom datetime import date\nfrom datetime import timedelta\n\n\ndef get_offset_date(modifed_date, offset_in_days):\n return date.isoformat(modifed_date + timedelta(days=int(offset_in_days)))\n\n\ndef get_trending_repositories(start_search_date, number_of_results=20):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{}/search/repositories'.format(github_api_uri)\n query_parameters = {'q': 'created:>{}'.format(start_search_date),\n 'sort': 'stars', 'order': 'desc', 'per_page': number_of_results}\n trending_repositories_json_list = requests.get(query_search_url,\n query_parameters).json()['items']\n result_trending_list = []\n for repository in trending_repositories_json_list:\n repository_name = repository['name']\n repoditory_owner = repository['owner']['login']\n result_trending_list.append({'repo_name': str(repository_name),\n 'repo_owner': str(repoditory_owner), 'stars': repository[\n 'stargazers_count'], 'issues': repository['open_issues'], 'url':\n repository['html_url']})\n return result_trending_list\n\n\ndef get_open_issues_amount(repo_owner, repo_name):\n github_api_uri = 'https://api.github.com'\n query_search_url = '{0}/repos/{1}/{2}/issues'.format(github_api_uri,\n repo_owner, repo_name)\n issues_json_data = requests.get(query_search_url).json()\n number_of_open_issues = len([x for x in issues_json_data if x['state'] ==\n 'open'])\n return number_of_open_issues\n\n\ndef print_result_to_console():\n print('Program prints {} most popular repositories since {}\\n'.format(\n number_of_results, week_earlier_date))\n for index, repo in enumerate(top_repositories_list):\n good_choice_label = ''\n if not repo['issues']:\n good_choice_label = 'Try it!'\n print('{0:2} {4:7} {1:70} {2:5} stars {3:2} issues'.format(index + \n 1, repo['url'], repo['stars'], repo['issues'], good_choice_label))\n\n\nif __name__ == '__main__':\n date_offset_in_days = -7\n week_earlier_date = get_offset_date(date.today(), date_offset_in_days)\n number_of_results = 20\n top_repositories_list = get_trending_repositories(week_earlier_date,\n number_of_results)\n print_result_to_console()\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def skip_func(list):
cnt = 0
for i in list:
padd = [0] * 200
try:
got = api.friends_ids(i, count=200)
except:
print('========NG=============', cnt)
follower_list.append(padd)
else:
print('==========OK==========', cnt)
follower_list.append(got)
cnt += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
<|reserved_special_token_0|>
df.head()
<|reserved_special_token_0|>
list_real
<|reserved_special_token_0|>
def skip_func(list):
cnt = 0
for i in list:
padd = [0] * 200
try:
got = api.friends_ids(i, count=200)
except:
print('========NG=============', cnt)
follower_list.append(padd)
else:
print('==========OK==========', cnt)
follower_list.append(got)
cnt += 1
skip_func(list_real)
pd.set_option('display.max_rows', 250)
<|reserved_special_token_0|>
df2.head()
df2.to_csv('ANetwork_moto_2020.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CONSUMER_KEY = config2.CONSUMER_KEY
CONSUMER_SECRET = config2.CONSUMER_SECRET
ACCESS_TOKEN = config2.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET
<|reserved_special_token_0|>
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
<|reserved_special_token_0|>
df = pd.read_csv('df_mix2.csv')
df = df.drop('Unnamed: 0', axis=1)[1:]
df = df.reset_index()
df = df.drop('index', axis=1)
df.columns = list(range(199))
df.head()
list_real = []
list_real = df[0]
list_real = list_real[:1440]
list_real
follower_list = []
def skip_func(list):
cnt = 0
for i in list:
padd = [0] * 200
try:
got = api.friends_ids(i, count=200)
except:
print('========NG=============', cnt)
follower_list.append(padd)
else:
print('==========OK==========', cnt)
follower_list.append(got)
cnt += 1
skip_func(list_real)
pd.set_option('display.max_rows', 250)
df = pd.DataFrame(follower_list)
df1 = df.fillna(0)
df1 = df1.astype(int)
df0 = pd.DataFrame(list_real)
df0.columns = ['ID']
df2 = pd.concat([df0, df1], axis=1)
df2 = df2.fillna(0)
df2 = df2.astype(int)
df2.head()
df2.to_csv('ANetwork_moto_2020.csv')
<|reserved_special_token_1|>
import numpy as np
import sys, os
import config2
CONSUMER_KEY = config2.CONSUMER_KEY
CONSUMER_SECRET = config2.CONSUMER_SECRET
ACCESS_TOKEN = config2.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET
import tweepy
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
import pandas as pd
df = pd.read_csv('df_mix2.csv')
df = df.drop('Unnamed: 0', axis=1)[1:]
df = df.reset_index()
df = df.drop('index', axis=1)
df.columns = list(range(199))
df.head()
list_real = []
list_real = df[0]
list_real = list_real[:1440]
list_real
follower_list = []
def skip_func(list):
cnt = 0
for i in list:
padd = [0] * 200
try:
got = api.friends_ids(i, count=200)
except:
print('========NG=============', cnt)
follower_list.append(padd)
else:
print('==========OK==========', cnt)
follower_list.append(got)
cnt += 1
skip_func(list_real)
pd.set_option('display.max_rows', 250)
df = pd.DataFrame(follower_list)
df1 = df.fillna(0)
df1 = df1.astype(int)
df0 = pd.DataFrame(list_real)
df0.columns = ['ID']
df2 = pd.concat([df0, df1], axis=1)
df2 = df2.fillna(0)
df2 = df2.astype(int)
df2.head()
df2.to_csv('ANetwork_moto_2020.csv')
<|reserved_special_token_1|>
#これは明日20200106に走らせましょう!
import numpy as np
import sys,os
import config2
CONSUMER_KEY = config2.CONSUMER_KEY
CONSUMER_SECRET = config2.CONSUMER_SECRET
ACCESS_TOKEN = config2.ACCESS_TOKEN
ACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET
import tweepy
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth,wait_on_rate_limit = True)#,wait_on_rate_limit_notify= True #PLOT TIME
#user = sys.argv[1]
import pandas as pd
df=pd.read_csv("df_mix2.csv")
df=df.drop("Unnamed: 0",axis=1)[1:]
df=df.reset_index()
df=df.drop("index",axis=1)
df.columns = list(range(199))
df.head()
list_real=[]
list_real=df[0]
list_real=list_real[:1440]
list_real
follower_list=[]
def skip_func(list):
cnt=0
for i in list:
padd= [0] * 200
try :
#got=api.followers_ids(i,count=200)
got=api.friends_ids(i,count=200)
except :
print('========NG=============',cnt)# pass (Real code == pass ,Practice code == print(NG))
follower_list.append(padd)
else :
print('==========OK==========',cnt)
#followerData["Follower_id"] = str(a)
#followerDatas.append(followerData)
#followerDatas.append(a)
follower_list.append(got)
#print(str(i))
#print(followerData)
cnt+=1
skip_func(list_real)
pd.set_option("display.max_rows", 250)
df=pd.DataFrame(follower_list)
df1=df.fillna(0)
df1=df1.astype(int)
df0=pd.DataFrame(list_real)
df0.columns=["ID"]
#print(df0.head())# core user's follow user
#pd.options.display.precision = 21
df2=pd.concat([df0, df1],axis=1)
df2=df2.fillna(0)
df2=df2.astype(int)
df2.head()
df2.to_csv("ANetwork_moto_2020.csv")
|
flexible
|
{
"blob_id": "8fac4571a3a1559e297754e89375be06d6c45c2d",
"index": 4795,
"step-1": "<mask token>\n\n\ndef skip_func(list):\n cnt = 0\n for i in list:\n padd = [0] * 200\n try:\n got = api.friends_ids(i, count=200)\n except:\n print('========NG=============', cnt)\n follower_list.append(padd)\n else:\n print('==========OK==========', cnt)\n follower_list.append(got)\n cnt += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n<mask token>\ndf.head()\n<mask token>\nlist_real\n<mask token>\n\n\ndef skip_func(list):\n cnt = 0\n for i in list:\n padd = [0] * 200\n try:\n got = api.friends_ids(i, count=200)\n except:\n print('========NG=============', cnt)\n follower_list.append(padd)\n else:\n print('==========OK==========', cnt)\n follower_list.append(got)\n cnt += 1\n\n\nskip_func(list_real)\npd.set_option('display.max_rows', 250)\n<mask token>\ndf2.head()\ndf2.to_csv('ANetwork_moto_2020.csv')\n",
"step-3": "<mask token>\nCONSUMER_KEY = config2.CONSUMER_KEY\nCONSUMER_SECRET = config2.CONSUMER_SECRET\nACCESS_TOKEN = config2.ACCESS_TOKEN\nACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET\n<mask token>\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True)\n<mask token>\ndf = pd.read_csv('df_mix2.csv')\ndf = df.drop('Unnamed: 0', axis=1)[1:]\ndf = df.reset_index()\ndf = df.drop('index', axis=1)\ndf.columns = list(range(199))\ndf.head()\nlist_real = []\nlist_real = df[0]\nlist_real = list_real[:1440]\nlist_real\nfollower_list = []\n\n\ndef skip_func(list):\n cnt = 0\n for i in list:\n padd = [0] * 200\n try:\n got = api.friends_ids(i, count=200)\n except:\n print('========NG=============', cnt)\n follower_list.append(padd)\n else:\n print('==========OK==========', cnt)\n follower_list.append(got)\n cnt += 1\n\n\nskip_func(list_real)\npd.set_option('display.max_rows', 250)\ndf = pd.DataFrame(follower_list)\ndf1 = df.fillna(0)\ndf1 = df1.astype(int)\ndf0 = pd.DataFrame(list_real)\ndf0.columns = ['ID']\ndf2 = pd.concat([df0, df1], axis=1)\ndf2 = df2.fillna(0)\ndf2 = df2.astype(int)\ndf2.head()\ndf2.to_csv('ANetwork_moto_2020.csv')\n",
"step-4": "import numpy as np\nimport sys, os\nimport config2\nCONSUMER_KEY = config2.CONSUMER_KEY\nCONSUMER_SECRET = config2.CONSUMER_SECRET\nACCESS_TOKEN = config2.ACCESS_TOKEN\nACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET\nimport tweepy\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit=True)\nimport pandas as pd\ndf = pd.read_csv('df_mix2.csv')\ndf = df.drop('Unnamed: 0', axis=1)[1:]\ndf = df.reset_index()\ndf = df.drop('index', axis=1)\ndf.columns = list(range(199))\ndf.head()\nlist_real = []\nlist_real = df[0]\nlist_real = list_real[:1440]\nlist_real\nfollower_list = []\n\n\ndef skip_func(list):\n cnt = 0\n for i in list:\n padd = [0] * 200\n try:\n got = api.friends_ids(i, count=200)\n except:\n print('========NG=============', cnt)\n follower_list.append(padd)\n else:\n print('==========OK==========', cnt)\n follower_list.append(got)\n cnt += 1\n\n\nskip_func(list_real)\npd.set_option('display.max_rows', 250)\ndf = pd.DataFrame(follower_list)\ndf1 = df.fillna(0)\ndf1 = df1.astype(int)\ndf0 = pd.DataFrame(list_real)\ndf0.columns = ['ID']\ndf2 = pd.concat([df0, df1], axis=1)\ndf2 = df2.fillna(0)\ndf2 = df2.astype(int)\ndf2.head()\ndf2.to_csv('ANetwork_moto_2020.csv')\n",
"step-5": "#これは明日20200106に走らせましょう!\nimport numpy as np\nimport sys,os\nimport config2\nCONSUMER_KEY = config2.CONSUMER_KEY\nCONSUMER_SECRET = config2.CONSUMER_SECRET\nACCESS_TOKEN = config2.ACCESS_TOKEN\nACCESS_TOKEN_SECRET = config2.ACCESS_TOKEN_SECRET\nimport tweepy\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth,wait_on_rate_limit = True)#,wait_on_rate_limit_notify= True #PLOT TIME\n#user = sys.argv[1]\nimport pandas as pd\ndf=pd.read_csv(\"df_mix2.csv\")\ndf=df.drop(\"Unnamed: 0\",axis=1)[1:]\ndf=df.reset_index()\ndf=df.drop(\"index\",axis=1)\ndf.columns = list(range(199))\ndf.head()\nlist_real=[]\nlist_real=df[0]\nlist_real=list_real[:1440]\nlist_real\nfollower_list=[]\ndef skip_func(list):\n cnt=0\n for i in list:\n padd= [0] * 200\n try :\n #got=api.followers_ids(i,count=200)\n got=api.friends_ids(i,count=200)\n except :\n print('========NG=============',cnt)# pass (Real code == pass ,Practice code == print(NG))\n follower_list.append(padd)\n else :\n print('==========OK==========',cnt)\n #followerData[\"Follower_id\"] = str(a)\n #followerDatas.append(followerData)\n #followerDatas.append(a)\n follower_list.append(got)\n #print(str(i))\n #print(followerData)\n cnt+=1\nskip_func(list_real)\npd.set_option(\"display.max_rows\", 250)\ndf=pd.DataFrame(follower_list)\ndf1=df.fillna(0)\ndf1=df1.astype(int)\ndf0=pd.DataFrame(list_real)\ndf0.columns=[\"ID\"]\n#print(df0.head())# core user's follow user\n#pd.options.display.precision = 21\ndf2=pd.concat([df0, df1],axis=1)\ndf2=df2.fillna(0)\ndf2=df2.astype(int)\ndf2.head()\ndf2.to_csv(\"ANetwork_moto_2020.csv\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import imageio
import h5py
import numpy as np
def create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks):
with h5py.File(data_path, 'a') as f:
f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)
f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape), chunks=chunks)
def create_image_collection_test_data(folder, n_images, min_shape, max_shape):
im_folder = os.path.join(folder, 'images')
label_folder = os.path.join(folder, 'labels')
os.makedirs(im_folder, exist_ok=True)
os.makedirs(label_folder, exist_ok=True)
for i in range(n_images):
shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(min_shape, max_shape))
raw = np.random.rand(*shape).astype('int16')
label = np.random.randint(0, 4, size=shape)
imageio.imwrite(os.path.join(im_folder, f"im_{i}.tif"), raw)
imageio.imwrite(os.path.join(label_folder, f"im_{i}.tif"), label)
|
normal
|
{
"blob_id": "e3417980599448f1293b56cb95312088e7a8abe3",
"index": 9713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(\n min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f'im_{i}.tif'), raw)\n imageio.imwrite(os.path.join(label_folder, f'im_{i}.tif'), label)\n",
"step-4": "import os\nimport imageio\nimport h5py\nimport numpy as np\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks\n ):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape\n ), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(\n min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f'im_{i}.tif'), raw)\n imageio.imwrite(os.path.join(label_folder, f'im_{i}.tif'), label)\n",
"step-5": "import os\nimport imageio\nimport h5py\nimport numpy as np\n\n\ndef create_segmentation_test_data(data_path, raw_key, label_key, shape, chunks):\n with h5py.File(data_path, 'a') as f:\n f.create_dataset(raw_key, data=np.random.rand(*shape), chunks=chunks)\n f.create_dataset(label_key, data=np.random.randint(0, 4, size=shape), chunks=chunks)\n\n\ndef create_image_collection_test_data(folder, n_images, min_shape, max_shape):\n im_folder = os.path.join(folder, 'images')\n label_folder = os.path.join(folder, 'labels')\n os.makedirs(im_folder, exist_ok=True)\n os.makedirs(label_folder, exist_ok=True)\n\n for i in range(n_images):\n shape = tuple(np.random.randint(mins, maxs) for mins, maxs in zip(min_shape, max_shape))\n raw = np.random.rand(*shape).astype('int16')\n label = np.random.randint(0, 4, size=shape)\n imageio.imwrite(os.path.join(im_folder, f\"im_{i}.tif\"), raw)\n imageio.imwrite(os.path.join(label_folder, f\"im_{i}.tif\"), label)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
conf = {'PROJECT': 'WCCIA', 'NAS_FOLDER':
'Q:\\GROUPS\\CORP_JGS_DSE\\ATI\\quotations', 'DB_SERVER': '10.0.36.129',
'DB_PORT': '34000/'}
|
normal
|
{
"blob_id": "fbce185671267bd70cf7b91696867b72dfcc8d5b",
"index": 1585,
"step-1": "<mask token>\n",
"step-2": "conf = {'PROJECT': 'WCCIA', 'NAS_FOLDER':\n 'Q:\\\\GROUPS\\\\CORP_JGS_DSE\\\\ATI\\\\quotations', 'DB_SERVER': '10.0.36.129',\n 'DB_PORT': '34000/'}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.set_printoptions(formatter={'float_kind': float_formatter})
<|reserved_special_token_0|>
if __name__ == '__main__':
import numpy.random as random
import sys
if len(sys.argv) == 1:
sys.exit('{} [directory]'.format(sys.argv[0]))
directory = sys.argv[1]
directory_aae = '{}/_aae/'.format(directory)
mode = sys.argv[2]
from latplan.util import get_ae_type
ae = default_networks[get_ae_type(directory)](directory).load()
if 'hanoi' in ae.path:
data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)
else:
data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)
parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [
2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],
'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [
'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}
print(data.shape)
try:
if 'learn' in mode:
raise Exception('learn')
aae = ActionAE(directory_aae).load()
except:
aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,
data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[
int(len(data) * 0.9):], data[int(len(data) * 0.9):]),
default_parameters, parameters)
aae.save()
N = data.shape[1] // 2
actions = aae.encode_action(data, batch_size=1000).round()
histogram = np.squeeze(actions.sum(axis=0, dtype=int))
all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],
actions.shape[2]), dtype=int)
for i, pos in enumerate(np.where(histogram > 0)[0]):
all_labels[i][0][pos] = 1
if 'plot' in mode:
aae.plot(data[:8], 'aae_train.png')
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test.png')
aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test_decoded.png', ae=ae)
transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),
axis=0), all_labels])
aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)
from latplan.util.timer import Timer
suc = transitions[:, N:]
from latplan.util.plot import plot_grid, squarify
plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(
'aae_all_actions_for_a_state_8x16.png'), verbose=True)
plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(
'aae_all_actions_for_a_state_16x8.png'), verbose=True)
plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(
'aae_all_actions_for_a_state_state.png'), verbose=True)
if 'check' in mode:
from latplan.util.timer import Timer
with Timer('loading csv...'):
all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),
dtype=np.int8)
with Timer('shuffling'):
random.shuffle(all_actions)
all_actions = all_actions[:10000]
count = 0
try:
pre_states = all_actions[:, :N]
suc_states = all_actions[:, N:]
pre_images = ae.decode_binary(pre_states, batch_size=1000)
suc_images = ae.decode_binary(suc_states, batch_size=1000)
import progressbar as pb
bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.
Timer('Elap: %(elapsed) '), pb.AbsoluteETA(
'Est: %(elapsed) '), pb.Bar()])
for pre_state, suc_state, pre_image, suc_image in bar(zip(
pre_states, suc_states, pre_images, suc_images)):
generated_transitions = aae.decode([np.repeat([pre_state],
128, axis=0), all_labels], batch_size=1000)
generated_suc_states = generated_transitions[:, N:]
generated_suc_images = ae.decode_binary(generated_suc_states,
batch_size=1000)
from latplan.util import bce
errors = bce(generated_suc_images, np.repeat([suc_image],
128, axis=0), axis=(1, 2))
min_error = np.amin(errors)
if min_error < 0.01:
count += 1
finally:
print({'count': count, 'total': len(all_actions)})
actions = aae.encode_action(data, batch_size=1000)
actions_r = actions.round()
histogram = actions.sum(axis=0)
print(histogram)
histogram_r = actions_r.sum(axis=0, dtype=int)
print(histogram_r)
print(np.count_nonzero(histogram_r > 0))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
float_formatter = lambda x: '%.3f' % x
np.set_printoptions(formatter={'float_kind': float_formatter})
default_parameters = {'lr': 0.0001, 'batch_size': 2000, 'full_epoch': 1000,
'epoch': 1000, 'max_temperature': 5.0, 'min_temperature': 0.1, 'M': 2}
if __name__ == '__main__':
import numpy.random as random
import sys
if len(sys.argv) == 1:
sys.exit('{} [directory]'.format(sys.argv[0]))
directory = sys.argv[1]
directory_aae = '{}/_aae/'.format(directory)
mode = sys.argv[2]
from latplan.util import get_ae_type
ae = default_networks[get_ae_type(directory)](directory).load()
if 'hanoi' in ae.path:
data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)
else:
data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)
parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [
2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],
'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [
'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}
print(data.shape)
try:
if 'learn' in mode:
raise Exception('learn')
aae = ActionAE(directory_aae).load()
except:
aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,
data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[
int(len(data) * 0.9):], data[int(len(data) * 0.9):]),
default_parameters, parameters)
aae.save()
N = data.shape[1] // 2
actions = aae.encode_action(data, batch_size=1000).round()
histogram = np.squeeze(actions.sum(axis=0, dtype=int))
all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],
actions.shape[2]), dtype=int)
for i, pos in enumerate(np.where(histogram > 0)[0]):
all_labels[i][0][pos] = 1
if 'plot' in mode:
aae.plot(data[:8], 'aae_train.png')
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test.png')
aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test_decoded.png', ae=ae)
transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),
axis=0), all_labels])
aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)
from latplan.util.timer import Timer
suc = transitions[:, N:]
from latplan.util.plot import plot_grid, squarify
plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(
'aae_all_actions_for_a_state_8x16.png'), verbose=True)
plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(
'aae_all_actions_for_a_state_16x8.png'), verbose=True)
plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(
'aae_all_actions_for_a_state_state.png'), verbose=True)
if 'check' in mode:
from latplan.util.timer import Timer
with Timer('loading csv...'):
all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),
dtype=np.int8)
with Timer('shuffling'):
random.shuffle(all_actions)
all_actions = all_actions[:10000]
count = 0
try:
pre_states = all_actions[:, :N]
suc_states = all_actions[:, N:]
pre_images = ae.decode_binary(pre_states, batch_size=1000)
suc_images = ae.decode_binary(suc_states, batch_size=1000)
import progressbar as pb
bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.
Timer('Elap: %(elapsed) '), pb.AbsoluteETA(
'Est: %(elapsed) '), pb.Bar()])
for pre_state, suc_state, pre_image, suc_image in bar(zip(
pre_states, suc_states, pre_images, suc_images)):
generated_transitions = aae.decode([np.repeat([pre_state],
128, axis=0), all_labels], batch_size=1000)
generated_suc_states = generated_transitions[:, N:]
generated_suc_images = ae.decode_binary(generated_suc_states,
batch_size=1000)
from latplan.util import bce
errors = bce(generated_suc_images, np.repeat([suc_image],
128, axis=0), axis=(1, 2))
min_error = np.amin(errors)
if min_error < 0.01:
count += 1
finally:
print({'count': count, 'total': len(all_actions)})
actions = aae.encode_action(data, batch_size=1000)
actions_r = actions.round()
histogram = actions.sum(axis=0)
print(histogram)
histogram_r = actions_r.sum(axis=0, dtype=int)
print(histogram_r)
print(np.count_nonzero(histogram_r > 0))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import warnings
import config
import numpy as np
from latplan.model import ActionAE, default_networks
from latplan.util import curry
from latplan.util.tuning import grid_search, nn_task
import keras.backend as K
import tensorflow as tf
float_formatter = lambda x: '%.3f' % x
np.set_printoptions(formatter={'float_kind': float_formatter})
default_parameters = {'lr': 0.0001, 'batch_size': 2000, 'full_epoch': 1000,
'epoch': 1000, 'max_temperature': 5.0, 'min_temperature': 0.1, 'M': 2}
if __name__ == '__main__':
import numpy.random as random
import sys
if len(sys.argv) == 1:
sys.exit('{} [directory]'.format(sys.argv[0]))
directory = sys.argv[1]
directory_aae = '{}/_aae/'.format(directory)
mode = sys.argv[2]
from latplan.util import get_ae_type
ae = default_networks[get_ae_type(directory)](directory).load()
if 'hanoi' in ae.path:
data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)
else:
data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)
parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [
2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],
'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [
'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}
print(data.shape)
try:
if 'learn' in mode:
raise Exception('learn')
aae = ActionAE(directory_aae).load()
except:
aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,
data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[
int(len(data) * 0.9):], data[int(len(data) * 0.9):]),
default_parameters, parameters)
aae.save()
N = data.shape[1] // 2
actions = aae.encode_action(data, batch_size=1000).round()
histogram = np.squeeze(actions.sum(axis=0, dtype=int))
all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],
actions.shape[2]), dtype=int)
for i, pos in enumerate(np.where(histogram > 0)[0]):
all_labels[i][0][pos] = 1
if 'plot' in mode:
aae.plot(data[:8], 'aae_train.png')
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test.png')
aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)
aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],
'aae_test_decoded.png', ae=ae)
transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),
axis=0), all_labels])
aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)
from latplan.util.timer import Timer
suc = transitions[:, N:]
from latplan.util.plot import plot_grid, squarify
plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(
'aae_all_actions_for_a_state_8x16.png'), verbose=True)
plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(
'aae_all_actions_for_a_state_16x8.png'), verbose=True)
plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(
'aae_all_actions_for_a_state_state.png'), verbose=True)
if 'check' in mode:
from latplan.util.timer import Timer
with Timer('loading csv...'):
all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),
dtype=np.int8)
with Timer('shuffling'):
random.shuffle(all_actions)
all_actions = all_actions[:10000]
count = 0
try:
pre_states = all_actions[:, :N]
suc_states = all_actions[:, N:]
pre_images = ae.decode_binary(pre_states, batch_size=1000)
suc_images = ae.decode_binary(suc_states, batch_size=1000)
import progressbar as pb
bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.
Timer('Elap: %(elapsed) '), pb.AbsoluteETA(
'Est: %(elapsed) '), pb.Bar()])
for pre_state, suc_state, pre_image, suc_image in bar(zip(
pre_states, suc_states, pre_images, suc_images)):
generated_transitions = aae.decode([np.repeat([pre_state],
128, axis=0), all_labels], batch_size=1000)
generated_suc_states = generated_transitions[:, N:]
generated_suc_images = ae.decode_binary(generated_suc_states,
batch_size=1000)
from latplan.util import bce
errors = bce(generated_suc_images, np.repeat([suc_image],
128, axis=0), axis=(1, 2))
min_error = np.amin(errors)
if min_error < 0.01:
count += 1
finally:
print({'count': count, 'total': len(all_actions)})
actions = aae.encode_action(data, batch_size=1000)
actions_r = actions.round()
histogram = actions.sum(axis=0)
print(histogram)
histogram_r = actions_r.sum(axis=0, dtype=int)
print(histogram_r)
print(np.count_nonzero(histogram_r > 0))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
import warnings
import config
import numpy as np
from latplan.model import ActionAE, default_networks
from latplan.util import curry
from latplan.util.tuning import grid_search, nn_task
import keras.backend as K
import tensorflow as tf
float_formatter = lambda x: "%.3f" % x
np.set_printoptions(formatter={'float_kind':float_formatter})
################################################################
# default values
default_parameters = {
'lr' : 0.0001,
'batch_size' : 2000,
'full_epoch' : 1000,
'epoch' : 1000,
'max_temperature' : 5.0,
'min_temperature' : 0.1,
'M' : 2,
}
if __name__ == '__main__':
import numpy.random as random
import sys
if len(sys.argv) == 1:
sys.exit("{} [directory]".format(sys.argv[0]))
directory = sys.argv[1]
directory_aae = "{}/_aae/".format(directory)
mode = sys.argv[2]
from latplan.util import get_ae_type
ae = default_networks[get_ae_type(directory)](directory).load()
if "hanoi" in ae.path:
data = np.loadtxt(ae.local("all_actions.csv"),dtype=np.int8)
else:
data = np.loadtxt(ae.local("actions.csv"),dtype=np.int8)
parameters = {
'N' :[1],
'M' :[128],
'layer' :[400],# 200,300,400,700,1000
'encoder_layers' : [2], # 0,2,3
'decoder_layers' : [2], # 0,1,3
'dropout' :[0.4], #[0.1,0.4],
# 'dropout_z' :[False],
'batch_size' :[2000],
'full_epoch' :[1000],
'epoch' :[1000],
'encoder_activation' :['relu'], # 'tanh'
'decoder_activation' :['relu'], # 'tanh',
# quick eval
'lr' :[0.001],
}
print(data.shape)
try:
if 'learn' in mode:
raise Exception('learn')
aae = ActionAE(directory_aae).load()
except:
aae,_,_ = grid_search(curry(nn_task, ActionAE, directory_aae,
data[:int(len(data)*0.9)], data[:int(len(data)*0.9)],
data[int(len(data)*0.9):], data[int(len(data)*0.9):],),
default_parameters,
parameters)
aae.save()
N = data.shape[1]//2
actions = aae.encode_action(data, batch_size=1000).round()
histogram = np.squeeze(actions.sum(axis=0,dtype=int))
all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1], actions.shape[2]), dtype=int)
for i, pos in enumerate(np.where(histogram > 0)[0]):
all_labels[i][0][pos] = 1
if 'plot' in mode:
aae.plot(data[:8], "aae_train.png")
aae.plot(data[int(len(data)*0.9):int(len(data)*0.9)+8], "aae_test.png")
aae.plot(data[:8], "aae_train_decoded.png", ae=ae)
aae.plot(data[int(len(data)*0.9):int(len(data)*0.9)+8], "aae_test_decoded.png", ae=ae)
transitions = aae.decode([np.repeat(data[:1,:N], len(all_labels), axis=0), all_labels])
aae.plot(transitions, "aae_all_actions_for_a_state.png", ae=ae)
from latplan.util.timer import Timer
# with Timer("loading csv..."):
# all_actions = np.loadtxt("{}/all_actions.csv".format(directory),dtype=np.int8)
# transitions = aae.decode([np.repeat(all_actions[:1,:N], len(all_labels), axis=0), all_labels])
suc = transitions[:,N:]
from latplan.util.plot import plot_grid, squarify
plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local("aae_all_actions_for_a_state_8x16.png"), verbose=True)
plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local("aae_all_actions_for_a_state_16x8.png"), verbose=True)
plot_grid(ae.decode_binary(data[:1,:N]), w=1, path=aae.local("aae_all_actions_for_a_state_state.png"), verbose=True)
if 'check' in mode:
from latplan.util.timer import Timer
with Timer("loading csv..."):
all_actions = np.loadtxt("{}/all_actions.csv".format(directory),dtype=np.int8)
with Timer("shuffling"):
random.shuffle(all_actions)
all_actions = all_actions[:10000]
count = 0
try:
pre_states = all_actions[:,:N]
suc_states = all_actions[:,N:]
pre_images = ae.decode_binary(pre_states,batch_size=1000)
suc_images = ae.decode_binary(suc_states,batch_size=1000)
import progressbar as pb
bar = pb.ProgressBar(
max_value=len(all_actions),
widgets=[
pb.Timer("Elap: %(elapsed) "),
pb.AbsoluteETA("Est: %(elapsed) "),
pb.Bar(),
])
for pre_state,suc_state,pre_image,suc_image in bar(zip(pre_states,suc_states,pre_images,suc_images)):
generated_transitions = aae.decode([
np.repeat([pre_state],128,axis=0),
all_labels,
],batch_size=1000)
generated_suc_states = generated_transitions[:,N:]
generated_suc_images = ae.decode_binary(generated_suc_states,batch_size=1000)
from latplan.util import bce
errors = bce(generated_suc_images, np.repeat([suc_image],128,axis=0), axis=(1,2))
min_error = np.amin(errors)
if min_error < 0.01:
count += 1
finally:
print({"count": count, "total":len(all_actions)})
actions = aae.encode_action(data, batch_size=1000)
actions_r = actions.round()
histogram = actions.sum(axis=0)
print(histogram)
histogram_r = actions_r.sum(axis=0,dtype=int)
print(histogram_r)
print (np.count_nonzero(histogram_r > 0))
"""* Summary:
Input: a subset of valid action pairs.
* Training:
* Evaluation:
If the number of actions are too large, they simply does not appear in the
training examples. This means those actions can be pruned, and you can lower the number of actions.
TODO:
verify all valid successors are generated, negative prior exploiting that fact
consider changing the input data: all successors are provided, closed world assumption
mearging action discriminator and state discriminator into one network
AD: use the minimum activation among the correct actions as a threshold
or use 1.0
AD: use action label as an additional input to discriminaotr (??)
AD: ensemble
"""
|
flexible
|
{
"blob_id": "f1c6340880b52ba86856913f74c7d589d9b49f49",
"index": 5179,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(formatter={'float_kind': float_formatter})\n<mask token>\nif __name__ == '__main__':\n import numpy.random as random\n import sys\n if len(sys.argv) == 1:\n sys.exit('{} [directory]'.format(sys.argv[0]))\n directory = sys.argv[1]\n directory_aae = '{}/_aae/'.format(directory)\n mode = sys.argv[2]\n from latplan.util import get_ae_type\n ae = default_networks[get_ae_type(directory)](directory).load()\n if 'hanoi' in ae.path:\n data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)\n else:\n data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)\n parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [\n 2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],\n 'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [\n 'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}\n print(data.shape)\n try:\n if 'learn' in mode:\n raise Exception('learn')\n aae = ActionAE(directory_aae).load()\n except:\n aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,\n data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[\n int(len(data) * 0.9):], data[int(len(data) * 0.9):]),\n default_parameters, parameters)\n aae.save()\n N = data.shape[1] // 2\n actions = aae.encode_action(data, batch_size=1000).round()\n histogram = np.squeeze(actions.sum(axis=0, dtype=int))\n all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],\n actions.shape[2]), dtype=int)\n for i, pos in enumerate(np.where(histogram > 0)[0]):\n all_labels[i][0][pos] = 1\n if 'plot' in mode:\n aae.plot(data[:8], 'aae_train.png')\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test.png')\n aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test_decoded.png', ae=ae)\n transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),\n axis=0), all_labels])\n aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)\n from latplan.util.timer import Timer\n suc = transitions[:, N:]\n from latplan.util.plot import plot_grid, squarify\n plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(\n 'aae_all_actions_for_a_state_8x16.png'), verbose=True)\n plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(\n 'aae_all_actions_for_a_state_16x8.png'), verbose=True)\n plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(\n 'aae_all_actions_for_a_state_state.png'), verbose=True)\n if 'check' in mode:\n from latplan.util.timer import Timer\n with Timer('loading csv...'):\n all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),\n dtype=np.int8)\n with Timer('shuffling'):\n random.shuffle(all_actions)\n all_actions = all_actions[:10000]\n count = 0\n try:\n pre_states = all_actions[:, :N]\n suc_states = all_actions[:, N:]\n pre_images = ae.decode_binary(pre_states, batch_size=1000)\n suc_images = ae.decode_binary(suc_states, batch_size=1000)\n import progressbar as pb\n bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.\n Timer('Elap: %(elapsed) '), pb.AbsoluteETA(\n 'Est: %(elapsed) '), pb.Bar()])\n for pre_state, suc_state, pre_image, suc_image in bar(zip(\n pre_states, suc_states, pre_images, suc_images)):\n generated_transitions = aae.decode([np.repeat([pre_state], \n 128, axis=0), all_labels], batch_size=1000)\n generated_suc_states = generated_transitions[:, N:]\n generated_suc_images = ae.decode_binary(generated_suc_states,\n batch_size=1000)\n from latplan.util import bce\n errors = bce(generated_suc_images, np.repeat([suc_image], \n 128, axis=0), axis=(1, 2))\n min_error = np.amin(errors)\n if min_error < 0.01:\n count += 1\n finally:\n print({'count': count, 'total': len(all_actions)})\n actions = aae.encode_action(data, batch_size=1000)\n actions_r = actions.round()\n histogram = actions.sum(axis=0)\n print(histogram)\n histogram_r = actions_r.sum(axis=0, dtype=int)\n print(histogram_r)\n print(np.count_nonzero(histogram_r > 0))\n<mask token>\n",
"step-3": "<mask token>\nfloat_formatter = lambda x: '%.3f' % x\nnp.set_printoptions(formatter={'float_kind': float_formatter})\ndefault_parameters = {'lr': 0.0001, 'batch_size': 2000, 'full_epoch': 1000,\n 'epoch': 1000, 'max_temperature': 5.0, 'min_temperature': 0.1, 'M': 2}\nif __name__ == '__main__':\n import numpy.random as random\n import sys\n if len(sys.argv) == 1:\n sys.exit('{} [directory]'.format(sys.argv[0]))\n directory = sys.argv[1]\n directory_aae = '{}/_aae/'.format(directory)\n mode = sys.argv[2]\n from latplan.util import get_ae_type\n ae = default_networks[get_ae_type(directory)](directory).load()\n if 'hanoi' in ae.path:\n data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)\n else:\n data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)\n parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [\n 2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],\n 'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [\n 'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}\n print(data.shape)\n try:\n if 'learn' in mode:\n raise Exception('learn')\n aae = ActionAE(directory_aae).load()\n except:\n aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,\n data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[\n int(len(data) * 0.9):], data[int(len(data) * 0.9):]),\n default_parameters, parameters)\n aae.save()\n N = data.shape[1] // 2\n actions = aae.encode_action(data, batch_size=1000).round()\n histogram = np.squeeze(actions.sum(axis=0, dtype=int))\n all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],\n actions.shape[2]), dtype=int)\n for i, pos in enumerate(np.where(histogram > 0)[0]):\n all_labels[i][0][pos] = 1\n if 'plot' in mode:\n aae.plot(data[:8], 'aae_train.png')\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test.png')\n aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test_decoded.png', ae=ae)\n transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),\n axis=0), all_labels])\n aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)\n from latplan.util.timer import Timer\n suc = transitions[:, N:]\n from latplan.util.plot import plot_grid, squarify\n plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(\n 'aae_all_actions_for_a_state_8x16.png'), verbose=True)\n plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(\n 'aae_all_actions_for_a_state_16x8.png'), verbose=True)\n plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(\n 'aae_all_actions_for_a_state_state.png'), verbose=True)\n if 'check' in mode:\n from latplan.util.timer import Timer\n with Timer('loading csv...'):\n all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),\n dtype=np.int8)\n with Timer('shuffling'):\n random.shuffle(all_actions)\n all_actions = all_actions[:10000]\n count = 0\n try:\n pre_states = all_actions[:, :N]\n suc_states = all_actions[:, N:]\n pre_images = ae.decode_binary(pre_states, batch_size=1000)\n suc_images = ae.decode_binary(suc_states, batch_size=1000)\n import progressbar as pb\n bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.\n Timer('Elap: %(elapsed) '), pb.AbsoluteETA(\n 'Est: %(elapsed) '), pb.Bar()])\n for pre_state, suc_state, pre_image, suc_image in bar(zip(\n pre_states, suc_states, pre_images, suc_images)):\n generated_transitions = aae.decode([np.repeat([pre_state], \n 128, axis=0), all_labels], batch_size=1000)\n generated_suc_states = generated_transitions[:, N:]\n generated_suc_images = ae.decode_binary(generated_suc_states,\n batch_size=1000)\n from latplan.util import bce\n errors = bce(generated_suc_images, np.repeat([suc_image], \n 128, axis=0), axis=(1, 2))\n min_error = np.amin(errors)\n if min_error < 0.01:\n count += 1\n finally:\n print({'count': count, 'total': len(all_actions)})\n actions = aae.encode_action(data, batch_size=1000)\n actions_r = actions.round()\n histogram = actions.sum(axis=0)\n print(histogram)\n histogram_r = actions_r.sum(axis=0, dtype=int)\n print(histogram_r)\n print(np.count_nonzero(histogram_r > 0))\n<mask token>\n",
"step-4": "import warnings\nimport config\nimport numpy as np\nfrom latplan.model import ActionAE, default_networks\nfrom latplan.util import curry\nfrom latplan.util.tuning import grid_search, nn_task\nimport keras.backend as K\nimport tensorflow as tf\nfloat_formatter = lambda x: '%.3f' % x\nnp.set_printoptions(formatter={'float_kind': float_formatter})\ndefault_parameters = {'lr': 0.0001, 'batch_size': 2000, 'full_epoch': 1000,\n 'epoch': 1000, 'max_temperature': 5.0, 'min_temperature': 0.1, 'M': 2}\nif __name__ == '__main__':\n import numpy.random as random\n import sys\n if len(sys.argv) == 1:\n sys.exit('{} [directory]'.format(sys.argv[0]))\n directory = sys.argv[1]\n directory_aae = '{}/_aae/'.format(directory)\n mode = sys.argv[2]\n from latplan.util import get_ae_type\n ae = default_networks[get_ae_type(directory)](directory).load()\n if 'hanoi' in ae.path:\n data = np.loadtxt(ae.local('all_actions.csv'), dtype=np.int8)\n else:\n data = np.loadtxt(ae.local('actions.csv'), dtype=np.int8)\n parameters = {'N': [1], 'M': [128], 'layer': [400], 'encoder_layers': [\n 2], 'decoder_layers': [2], 'dropout': [0.4], 'batch_size': [2000],\n 'full_epoch': [1000], 'epoch': [1000], 'encoder_activation': [\n 'relu'], 'decoder_activation': ['relu'], 'lr': [0.001]}\n print(data.shape)\n try:\n if 'learn' in mode:\n raise Exception('learn')\n aae = ActionAE(directory_aae).load()\n except:\n aae, _, _ = grid_search(curry(nn_task, ActionAE, directory_aae,\n data[:int(len(data) * 0.9)], data[:int(len(data) * 0.9)], data[\n int(len(data) * 0.9):], data[int(len(data) * 0.9):]),\n default_parameters, parameters)\n aae.save()\n N = data.shape[1] // 2\n actions = aae.encode_action(data, batch_size=1000).round()\n histogram = np.squeeze(actions.sum(axis=0, dtype=int))\n all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1],\n actions.shape[2]), dtype=int)\n for i, pos in enumerate(np.where(histogram > 0)[0]):\n all_labels[i][0][pos] = 1\n if 'plot' in mode:\n aae.plot(data[:8], 'aae_train.png')\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test.png')\n aae.plot(data[:8], 'aae_train_decoded.png', ae=ae)\n aae.plot(data[int(len(data) * 0.9):int(len(data) * 0.9) + 8],\n 'aae_test_decoded.png', ae=ae)\n transitions = aae.decode([np.repeat(data[:1, :N], len(all_labels),\n axis=0), all_labels])\n aae.plot(transitions, 'aae_all_actions_for_a_state.png', ae=ae)\n from latplan.util.timer import Timer\n suc = transitions[:, N:]\n from latplan.util.plot import plot_grid, squarify\n plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(\n 'aae_all_actions_for_a_state_8x16.png'), verbose=True)\n plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(\n 'aae_all_actions_for_a_state_16x8.png'), verbose=True)\n plot_grid(ae.decode_binary(data[:1, :N]), w=1, path=aae.local(\n 'aae_all_actions_for_a_state_state.png'), verbose=True)\n if 'check' in mode:\n from latplan.util.timer import Timer\n with Timer('loading csv...'):\n all_actions = np.loadtxt('{}/all_actions.csv'.format(directory),\n dtype=np.int8)\n with Timer('shuffling'):\n random.shuffle(all_actions)\n all_actions = all_actions[:10000]\n count = 0\n try:\n pre_states = all_actions[:, :N]\n suc_states = all_actions[:, N:]\n pre_images = ae.decode_binary(pre_states, batch_size=1000)\n suc_images = ae.decode_binary(suc_states, batch_size=1000)\n import progressbar as pb\n bar = pb.ProgressBar(max_value=len(all_actions), widgets=[pb.\n Timer('Elap: %(elapsed) '), pb.AbsoluteETA(\n 'Est: %(elapsed) '), pb.Bar()])\n for pre_state, suc_state, pre_image, suc_image in bar(zip(\n pre_states, suc_states, pre_images, suc_images)):\n generated_transitions = aae.decode([np.repeat([pre_state], \n 128, axis=0), all_labels], batch_size=1000)\n generated_suc_states = generated_transitions[:, N:]\n generated_suc_images = ae.decode_binary(generated_suc_states,\n batch_size=1000)\n from latplan.util import bce\n errors = bce(generated_suc_images, np.repeat([suc_image], \n 128, axis=0), axis=(1, 2))\n min_error = np.amin(errors)\n if min_error < 0.01:\n count += 1\n finally:\n print({'count': count, 'total': len(all_actions)})\n actions = aae.encode_action(data, batch_size=1000)\n actions_r = actions.round()\n histogram = actions.sum(axis=0)\n print(histogram)\n histogram_r = actions_r.sum(axis=0, dtype=int)\n print(histogram_r)\n print(np.count_nonzero(histogram_r > 0))\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\nimport warnings\nimport config\nimport numpy as np\nfrom latplan.model import ActionAE, default_networks\nfrom latplan.util import curry\nfrom latplan.util.tuning import grid_search, nn_task\n\nimport keras.backend as K\nimport tensorflow as tf\n\nfloat_formatter = lambda x: \"%.3f\" % x\nnp.set_printoptions(formatter={'float_kind':float_formatter})\n\n################################################################\n\n# default values\ndefault_parameters = {\n 'lr' : 0.0001,\n 'batch_size' : 2000,\n 'full_epoch' : 1000,\n 'epoch' : 1000,\n 'max_temperature' : 5.0,\n 'min_temperature' : 0.1,\n 'M' : 2,\n}\n\nif __name__ == '__main__':\n import numpy.random as random\n\n import sys\n if len(sys.argv) == 1:\n sys.exit(\"{} [directory]\".format(sys.argv[0]))\n\n directory = sys.argv[1]\n directory_aae = \"{}/_aae/\".format(directory)\n mode = sys.argv[2]\n \n from latplan.util import get_ae_type\n ae = default_networks[get_ae_type(directory)](directory).load()\n\n if \"hanoi\" in ae.path:\n data = np.loadtxt(ae.local(\"all_actions.csv\"),dtype=np.int8)\n else:\n data = np.loadtxt(ae.local(\"actions.csv\"),dtype=np.int8)\n \n parameters = {\n 'N' :[1],\n 'M' :[128],\n 'layer' :[400],# 200,300,400,700,1000\n 'encoder_layers' : [2], # 0,2,3\n 'decoder_layers' : [2], # 0,1,3\n 'dropout' :[0.4], #[0.1,0.4],\n # 'dropout_z' :[False],\n 'batch_size' :[2000],\n 'full_epoch' :[1000],\n 'epoch' :[1000],\n 'encoder_activation' :['relu'], # 'tanh'\n 'decoder_activation' :['relu'], # 'tanh',\n # quick eval\n 'lr' :[0.001],\n }\n print(data.shape)\n try:\n if 'learn' in mode:\n raise Exception('learn')\n aae = ActionAE(directory_aae).load()\n except:\n aae,_,_ = grid_search(curry(nn_task, ActionAE, directory_aae,\n data[:int(len(data)*0.9)], data[:int(len(data)*0.9)],\n data[int(len(data)*0.9):], data[int(len(data)*0.9):],),\n default_parameters,\n parameters)\n aae.save()\n\n N = data.shape[1]//2\n \n actions = aae.encode_action(data, batch_size=1000).round()\n histogram = np.squeeze(actions.sum(axis=0,dtype=int))\n all_labels = np.zeros((np.count_nonzero(histogram), actions.shape[1], actions.shape[2]), dtype=int)\n for i, pos in enumerate(np.where(histogram > 0)[0]):\n all_labels[i][0][pos] = 1\n \n if 'plot' in mode:\n aae.plot(data[:8], \"aae_train.png\")\n aae.plot(data[int(len(data)*0.9):int(len(data)*0.9)+8], \"aae_test.png\")\n \n \n aae.plot(data[:8], \"aae_train_decoded.png\", ae=ae)\n aae.plot(data[int(len(data)*0.9):int(len(data)*0.9)+8], \"aae_test_decoded.png\", ae=ae)\n \n transitions = aae.decode([np.repeat(data[:1,:N], len(all_labels), axis=0), all_labels])\n aae.plot(transitions, \"aae_all_actions_for_a_state.png\", ae=ae)\n \n from latplan.util.timer import Timer\n # with Timer(\"loading csv...\"):\n # all_actions = np.loadtxt(\"{}/all_actions.csv\".format(directory),dtype=np.int8)\n # transitions = aae.decode([np.repeat(all_actions[:1,:N], len(all_labels), axis=0), all_labels])\n suc = transitions[:,N:]\n from latplan.util.plot import plot_grid, squarify\n plot_grid([x for x in ae.decode_binary(suc)], w=8, path=aae.local(\"aae_all_actions_for_a_state_8x16.png\"), verbose=True)\n plot_grid([x for x in ae.decode_binary(suc)], w=16, path=aae.local(\"aae_all_actions_for_a_state_16x8.png\"), verbose=True)\n plot_grid(ae.decode_binary(data[:1,:N]), w=1, path=aae.local(\"aae_all_actions_for_a_state_state.png\"), verbose=True)\n \n \n if 'check' in mode:\n from latplan.util.timer import Timer\n with Timer(\"loading csv...\"):\n all_actions = np.loadtxt(\"{}/all_actions.csv\".format(directory),dtype=np.int8)\n\n with Timer(\"shuffling\"):\n random.shuffle(all_actions)\n all_actions = all_actions[:10000]\n\n count = 0\n try:\n pre_states = all_actions[:,:N]\n suc_states = all_actions[:,N:]\n pre_images = ae.decode_binary(pre_states,batch_size=1000)\n suc_images = ae.decode_binary(suc_states,batch_size=1000)\n\n import progressbar as pb\n bar = pb.ProgressBar(\n max_value=len(all_actions),\n widgets=[\n pb.Timer(\"Elap: %(elapsed) \"),\n pb.AbsoluteETA(\"Est: %(elapsed) \"),\n pb.Bar(),\n ])\n for pre_state,suc_state,pre_image,suc_image in bar(zip(pre_states,suc_states,pre_images,suc_images)):\n \n generated_transitions = aae.decode([\n np.repeat([pre_state],128,axis=0),\n all_labels,\n ],batch_size=1000)\n generated_suc_states = generated_transitions[:,N:]\n generated_suc_images = ae.decode_binary(generated_suc_states,batch_size=1000)\n\n from latplan.util import bce\n errors = bce(generated_suc_images, np.repeat([suc_image],128,axis=0), axis=(1,2))\n min_error = np.amin(errors)\n if min_error < 0.01:\n count += 1\n finally:\n print({\"count\": count, \"total\":len(all_actions)})\n \n actions = aae.encode_action(data, batch_size=1000)\n actions_r = actions.round()\n\n histogram = actions.sum(axis=0)\n print(histogram)\n histogram_r = actions_r.sum(axis=0,dtype=int)\n print(histogram_r)\n print (np.count_nonzero(histogram_r > 0))\n \n\"\"\"* Summary:\nInput: a subset of valid action pairs.\n\n* Training:\n\n* Evaluation:\n\n\n\nIf the number of actions are too large, they simply does not appear in the\ntraining examples. This means those actions can be pruned, and you can lower the number of actions.\n\n\nTODO:\nverify all valid successors are generated, negative prior exploiting that fact\n\nconsider changing the input data: all successors are provided, closed world assumption\n\nmearging action discriminator and state discriminator into one network\n\n\nAD: use the minimum activation among the correct actions as a threshold\nor use 1.0\n\nAD: use action label as an additional input to discriminaotr (??)\n\nAD: ensemble\n\n\n\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 7 17:42:18 2018
@author: Tim
"""
import music21 as m21
import music21.features.jSymbolic as jsym
import scipy.stats
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# round all duration values to this many digits!
# some are stored as fractions and that's just inconvenient
ROUND_DURS_DIGITS = 5
# N.B. THE HEADERS ARE:
# 0: tunefamily
# 1: songid
# 2: motifid
# 3: begintime
# 4: endtime
# 5: duration
# 6: startindex
# 7: endindex
# 8: numberofnotes
# 9: motifclass
# 10: description
# 11: annotator
# 12: changes
# try to fetch a single motif
# def extractMotif(annEntry, songs):
# """
# given a row from the annotation file and the database of score files,
# return the notes of theassociated motif and some of its metadata as a
# dictionary.
# """
#
# songName = annEntry[1]
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
#
# #add number of ties before start index from start index; meertens
# #DOESN'T count tied notes as notes but music21 DOES
# allNotes = songs[songName].score.flat.notes.stream()
# #subtract 1 here to get the first note of the occurence in the slice
# #so that we can get rid of it if it's a rest
# beforeSlice = allNotes[:inStart-1]
# numTies = 0
# for n in beforeSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
# inStart += numTies
#
# #do the same for ties inside of the snippet, but also keep track of where
# #they are and save that information with the motif so we don't have to go
# #through this procedure again
# numTies = 0
# inSlice = allNotes[inStart:(inStart+numNotes)]
# for n in inSlice:
# if(n.tie != None):
# if(n.tie.type == 'start'):
# numTies += 1
#
#
# #this new numNotes will work with music21
# numNotes += numTies
#
# #NOW we know that we have the actual motif!
# motif = allNotes[inStart:(inStart+numNotes)]
#
# return {'notes':motif,
# 'startInd':inStart,
# 'endInd':(inStart+numNotes),
# 'songID':annEntry[1],
# 'motifClass':annEntry[9],
# 'duration':annEntry[5]}
# annotated first starting at 0, but tied notes are only counted for the onset
# must disregard tied notes when doing start/end indices tabarnak
# so: consider the list of notes up to the first index. if there's n ties
# that live behind the start index, increment the start index by n. when done,
# look 8 notes ahead and do the same thing
def extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):
"""
given song name, occurrence start, occurrence end, and the database of score files,
return the notes of the associated pattern occurrence
useTies is a boolean determining whether or not tied notes count as
two notes or one for the purpose of indexing (true for 1, false for 2)
necessary bc MTC-ANN indexing doesn't count
"""
# inStart = int(annEntry[6])
# numNotes = int(annEntry[8])
numNotes = inEnd - inStart + 1 # including endpoints
# add number of ties before start index from start index; meertens
# DOESN'T count tied notes as notes but music21 DOES
allNotes = songs[songName].score.flat.notes.stream()
# subtract 1 here to get the first note of the occurence in the slice
# so that we can get rid of it if it's a rest
if(useTies):
beforeSlice = allNotes[:inStart-1]
numTies = 0
for n in beforeSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
inStart += numTies
# do the same for ties inside of the snippet, but also keep track of where
# they are and save that information with the pattOcc so we don't have to go
# through this procedure again (TODO)
numTies = 0
inSlice = allNotes[inStart:(inStart+numNotes)]
for n in inSlice:
if(n.tie is not None):
if(n.tie.type == 'start'):
numTies += 1
# this new numNotes will work with music21
numNotes += numTies
pattOcc = allNotes[inStart:(inStart+numNotes)]
return pattOcc
def getFeaturesForSongs(score):
vec = {}
mel = score.flat.notes.stream()
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
couInt = dict(Counter(intervals))
for k in couInt.keys():
couInt[k] /= len(intervals)
vec['interval_probs'] = couInt
vec['pitch_mean'] = np.mean(noteNums)
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
couRtm = dict(Counter(noteDurs))
for k in couRtm.keys():
couRtm[k] /= len(noteDurs)
vec['duration_probs'] = couRtm
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
# HISTOGRAMS:
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
return vec
# single method that is passed an entry from the motifs dict
# and the database of songs and returns a dict that is a feature
# vector for that motif.
def getFeaturesForOccurrences(cur_class, songs):
max_length_occ = 10
vec = {}
mel = cur_class.score
# for now just remove rests
noteNums = [x.pitch.midi for x in mel]
intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]
highest = max(noteNums)
lowest = min(noteNums)
vec['numNotes'] = len(noteNums)
vec['pitch_highest'] = highest
vec['pitch_lowest'] = lowest
vec['pitch_range'] = highest-lowest
vec['pitch_num_classes'] = len(set(noteNums))
vec['pitch_mean'] = np.mean(noteNums)
vec['pitch_std'] = np.std(noteNums)
vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)
vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)
# pitch counting
for n in range(12):
num = len([x for x in noteNums if abs(x) % 12 == n])
vec['pitch_class_count_' + str(n)] = num / len(noteNums)
vec['interval_max'] = max(np.abs(intervals))
vec['interval_min'] = min(np.abs(intervals))
vec['interval_largest_asc'] = max([max(intervals), 0])
vec['interval_largest_desc'] = min([min(intervals), 0])
vec['interval_mean'] = np.mean(np.abs(intervals))
vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)
vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])
vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)
# interval counting
for n in range(13):
num = len([x for x in intervals if abs(x) == n])
vec['interval_count_' + str(n)] = num / len(intervals)
# -1 if monotonically down, 1 if up, else 0
if all([np.sign(x) == 1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = 1
elif all([np.sign(x) == -1 for x in intervals]):
vec['interval_strict_asc_or_desc'] = -1
else:
vec['interval_strict_asc_or_desc'] = 0
# rhythmic properties
noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]
vec['rhythm_duration'] = sum(noteDurs)
vec['rhythm_longest_note'] = max(noteDurs)
vec['rhythm_shortest_note'] = min(noteDurs)
vec['rhythm_density'] = np.mean(noteDurs)
vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014
vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]
# rhythm counting
for n in range(-3, 3):
num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])
vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)
# POLYFIT IDEA
yCoords = [y - noteNums[0] for y in noteNums]
xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]
xCoords = [x - xtemp[0] for x in xtemp]
# print(str(xCoords) + " vs " + str(yCoords))
polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)
vec['polyfit_1'] = polyFit1[0][0]
vec['polyfit_residual_1'] = 0
if polyFit1[1].size > 0:
vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])
vec['polyfit_2'] = 0
vec['polyfit_residual_2'] = 0
vec['polyfit_3'] = 0
vec['polyfit_residual_3'] = 0
if len(noteNums) >= 3:
polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)
vec['polyfit_2'] = polyFit2[0][0]
if polyFit2[1].size > 0:
vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])
if len(noteNums) >= 4:
polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)
vec['polyfit_3'] = polyFit3[0][0]
if polyFit3[1].size > 0:
vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])
# add sequence representation of occurrence
zeros = [0 for i in range(max_length_occ)]
for i in range(max_length_occ):
vec['seq_note_' + str(i)] = (noteNums + zeros)[i]
vec['seq_interval_' + str(i)] = (intervals + zeros)[i]
vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]
# differences between song and this motif
songVec = songs[cur_class.songName].songFeatures
song_diff_keys = [
'interval_mean',
'rhythm_variability',
'rhythm_density',
'interval_signs',
'pitch_mean',
'interval_prop_small',
'interval_prop_large'
]
song_diff_keys += [x for x in vec.keys() if '_count' in x]
for key in song_diff_keys:
vec['diff_' + key] = songVec[key] - vec[key]
# songScore = songs[motif['songName']]['score'].flat.notes.stream()
# songScoreNums = [x.pitch.midi for x in songScore]
# vec['intervalFollowing'] = 0
# if motif['endInd'] + 1 < len(songScoreNums):
# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]
# vec['intervalPreceding'] = 0
# if motif['endInd'] - 1 > 0:
# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]
sumIntProbs = 1
for i in intervals:
sumIntProbs *= songVec['interval_probs'][i]
vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)
sumDurProbs = 1
for d in noteDurs:
sumDurProbs *= songVec['duration_probs'][d]
vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)
vec['rhythm_starts_on_downbeat'] = 0
vec['rhythm_crosses_measure'] = 0
vec['rhythm_start_beat_str'] = 0
vec['rhythm_last_beat_str'] = 0
try:
noteBeats = [x.beat for x in mel]
vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)
vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0
# figure out how to tell if note has associated time signature
noteStr = [x.beatStrength for x in mel]
vec['rhythm_start_beat_str'] = np.log(noteStr[0])
vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])
except m21.Music21ObjectException:
# this is not a good solution.
pass
# send it back
return vec
def getFeaturesForClasses(patternClass, occs, songs):
# take the average/std over all occurrences
vec = {}
vec['numOccs'] = len(patternClass.occNames)
occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()
for fk in occFeatureKeys:
allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]
vec["avg_" + fk] = np.mean(allOccVals)
vec["std_" + fk] = np.std(allOccVals)
scores = [occs[oc].score.flat for oc in patternClass.occNames]
noteNums = [[x.pitch.midi for x in mel] for mel in scores]
noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)
for x in mel] for mel in scores]
flatNums = [x for subList in noteNums for x in subList]
vec['num_notes_total'] = len(flatNums)
vec['unique_pitch_prop_content'] = \
len(set(tuple(x) for x in noteNums)) / vec['numOccs']
vec['unique_rhythm_prop_content'] = \
len(set(tuple(x) for x in noteDurs)) / vec['numOccs']
pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]
vec['prop_unique_content'] = \
len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']
return vec
def filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):
# so: we want to take a sample of our huge number of generated pattern classes
# such that the number of occurrences and average cardinality doesn't easily
# distinguish our sample from the annotated group.
# perform a quick and dirty knn to get a bunch of generated class names
# whose cardinalities and numOccs somewhat match the annotated data.
indexPairs = np.arange(len(annPClassNames))
indexPairs = np.concatenate([indexPairs, indexPairs])
np.random.shuffle(indexPairs)
indexPairs = np.split(indexPairs, len(indexPairs)/2)
# deep copy!
genPClassNamesCopy = list(genPClassNames)
filtGenPClassNames = []
for i in range(len(annPClassNames)):
tar1 = pClasses[annPClassNames[indexPairs[i][0]]]
tar2 = pClasses[annPClassNames[indexPairs[i][1]]]
tarNumOccs = len(tar1.occNames)
tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]
tarNumNotes = np.mean(tar2Notes)
candidateNameList = []
# calculate how close each generated class is to these parameters
for gcn in genPClassNamesCopy:
cand = pClasses[gcn]
candNumOccs = len(cand.occNames)
candNotes = [len(pOccs[on].score) for on in cand.occNames]
candNumNotes = np.mean(candNotes)
candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2
candidateNameList.append([candScore, gcn])
# from the kNearest closest generated classes, choose one and remove
# that one from the copy array
candidateNameList = sorted(candidateNameList, key=lambda x: x[0])
chop = candidateNameList[0:kNearest]
choice = chop[np.random.choice(kNearest)][1]
filtGenPClassNames.append(choice)
genPClassNamesCopy.remove(choice)
return filtGenPClassNames
def split_into_chunks(inp, num_chunks):
chunk_len = int(np.floor(len(inp) / num_chunks))
chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]
if len(chunks) > num_chunks:
for i, x in enumerate(chunks[num_chunks]):
chunks[i].append(x)
del chunks[num_chunks]
return chunks
# just for testing: get all features
# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))
def inspectFeature(featureName, table, tableNames, featsType="classFeatures"):
ret = []
for tn in tableNames:
item = table[tn]
ret.append(item[featsType][featureName])
return ret
def scatterFeatures(fn1, fn2, table, tableNames):
xs = []
ys = []
types = []
for tn in tableNames:
item = table[tn]
xs.append(item.classFeatures[fn1])
ys.append(item.classFeatures[fn2])
if item['type'] == 'ann':
types.append('r')
else:
types.append('k')
print(types)
plt.scatter(xs, ys, c=types)
plt.xlabel(fn1)
plt.ylabel(fn2)
plt.show()
return
|
normal
|
{
"blob_id": "eb9135c6bcf89a62534cfc8480e5d44a089fe5a8",
"index": 1216,
"step-1": "<mask token>\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\n<mask token>\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\n<mask token>\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-3": "<mask token>\nROUND_DURS_DIGITS = 5\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-4": "<mask token>\nimport music21 as m21\nimport music21.features.jSymbolic as jsym\nimport scipy.stats\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\nROUND_DURS_DIGITS = 5\n\n\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n numNotes = inEnd - inStart + 1\n allNotes = songs[songName].score.flat.notes.stream()\n if useTies:\n beforeSlice = allNotes[:inStart - 1]\n numTies = 0\n for n in beforeSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n inStart += numTies\n numTies = 0\n inSlice = allNotes[inStart:inStart + numNotes]\n for n in inSlice:\n if n.tie is not None:\n if n.tie.type == 'start':\n numTies += 1\n numNotes += numTies\n pattOcc = allNotes[inStart:inStart + numNotes]\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n return vec\n\n\ndef getFeaturesForOccurrences(cur_class, songs):\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n noteNums = [x.pitch.midi for x in mel]\n intervals = [(noteNums[n] - noteNums[n - 1]) for n in range(1, len(\n noteNums))]\n highest = max(noteNums)\n lowest = min(noteNums)\n vec['numNotes'] = len(noteNums)\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest - lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([(abs(intervals[n]) <= 2) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([(abs(intervals[n]) >= 7) for n in\n range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(\n noteNums) - 1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n if all([(np.sign(x) == 1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([(np.sign(x) == -1) for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs])\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs) - 1]\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2 ** n <= x < 2 ** (n + 1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n yCoords = [(y - noteNums[0]) for y in noteNums]\n xtemp = [(float(x.offset) / vec['rhythm_duration']) for x in mel]\n xCoords = [(x - xtemp[0]) for x in xtemp]\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n zeros = [(0) for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n songVec = songs[cur_class.songName].songFeatures\n song_diff_keys = ['interval_mean', 'rhythm_variability',\n 'rhythm_density', 'interval_signs', 'pitch_mean',\n 'interval_prop_small', 'interval_prop_large']\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = noteBeats[0] == 1.0\n vec['rhythm_crosses_measure'] = sum([(noteBeats[n] < noteBeats[n - \n 1]) for n in range(1, len(noteBeats))]) > 0\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr) - 1])\n except m21.Music21ObjectException:\n pass\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n vec = {}\n vec['numOccs'] = len(patternClass.occNames)\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in\n patternClass.occNames]\n vec['avg_' + fk] = np.mean(allOccVals)\n vec['std_' + fk] = np.std(allOccVals)\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in\n mel] for mel in scores]\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n vec['unique_pitch_prop_content'] = len(set(tuple(x) for x in noteNums)\n ) / vec['numOccs']\n vec['unique_rhythm_prop_content'] = len(set(tuple(x) for x in noteDurs)\n ) / vec['numOccs']\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec[\n 'numOccs'])]\n vec['prop_unique_content'] = len(set(tuple(x) for x in pitchAndDurs)\n ) / vec['numOccs']\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest,\n pClasses, pOccs):\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs) / 2)\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n for i in range(len(annPClassNames)):\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n candidateNameList = []\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n candScore = (candNumOccs - tarNumOccs) ** 2 + (candNumNotes -\n tarNumNotes) ** 2\n candidateNameList.append([candScore, gcn])\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n return chunks\n\n\ndef inspectFeature(featureName, table, tableNames, featsType='classFeatures'):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n xs = []\n ys = []\n types = []\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n print(types)\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 7 17:42:18 2018\n\n@author: Tim\n\"\"\"\nimport music21 as m21\nimport music21.features.jSymbolic as jsym\nimport scipy.stats\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom timeit import default_timer as timer\n\n# round all duration values to this many digits!\n# some are stored as fractions and that's just inconvenient\nROUND_DURS_DIGITS = 5\n\n# N.B. THE HEADERS ARE:\n# 0: tunefamily\n# 1: songid\n# 2: motifid\n# 3: begintime\n# 4: endtime\n# 5: duration\n# 6: startindex\n# 7: endindex\n# 8: numberofnotes\n# 9: motifclass\n# 10: description\n# 11: annotator\n# 12: changes\n\n# try to fetch a single motif\n\n\n# def extractMotif(annEntry, songs):\n# \"\"\"\n# given a row from the annotation file and the database of score files,\n# return the notes of theassociated motif and some of its metadata as a\n# dictionary.\n# \"\"\"\n#\n# songName = annEntry[1]\n# inStart = int(annEntry[6])\n# numNotes = int(annEntry[8])\n#\n# #add number of ties before start index from start index; meertens\n# #DOESN'T count tied notes as notes but music21 DOES\n# allNotes = songs[songName].score.flat.notes.stream()\n# #subtract 1 here to get the first note of the occurence in the slice\n# #so that we can get rid of it if it's a rest\n# beforeSlice = allNotes[:inStart-1]\n# numTies = 0\n# for n in beforeSlice:\n# if(n.tie != None):\n# if(n.tie.type == 'start'):\n# numTies += 1\n#\n# inStart += numTies\n#\n# #do the same for ties inside of the snippet, but also keep track of where\n# #they are and save that information with the motif so we don't have to go\n# #through this procedure again\n# numTies = 0\n# inSlice = allNotes[inStart:(inStart+numNotes)]\n# for n in inSlice:\n# if(n.tie != None):\n# if(n.tie.type == 'start'):\n# numTies += 1\n#\n#\n# #this new numNotes will work with music21\n# numNotes += numTies\n#\n# #NOW we know that we have the actual motif!\n# motif = allNotes[inStart:(inStart+numNotes)]\n#\n# return {'notes':motif,\n# 'startInd':inStart,\n# 'endInd':(inStart+numNotes),\n# 'songID':annEntry[1],\n# 'motifClass':annEntry[9],\n# 'duration':annEntry[5]}\n\n# annotated first starting at 0, but tied notes are only counted for the onset\n# must disregard tied notes when doing start/end indices tabarnak\n\n# so: consider the list of notes up to the first index. if there's n ties\n# that live behind the start index, increment the start index by n. when done,\n# look 8 notes ahead and do the same thing\ndef extractPatternOccurrence(songName, inStart, inEnd, useTies, songs):\n \"\"\"\n given song name, occurrence start, occurrence end, and the database of score files,\n return the notes of the associated pattern occurrence\n useTies is a boolean determining whether or not tied notes count as\n two notes or one for the purpose of indexing (true for 1, false for 2)\n necessary bc MTC-ANN indexing doesn't count\n \"\"\"\n\n # inStart = int(annEntry[6])\n # numNotes = int(annEntry[8])\n numNotes = inEnd - inStart + 1 # including endpoints\n\n # add number of ties before start index from start index; meertens\n # DOESN'T count tied notes as notes but music21 DOES\n allNotes = songs[songName].score.flat.notes.stream()\n # subtract 1 here to get the first note of the occurence in the slice\n # so that we can get rid of it if it's a rest\n if(useTies):\n beforeSlice = allNotes[:inStart-1]\n numTies = 0\n for n in beforeSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n inStart += numTies\n\n # do the same for ties inside of the snippet, but also keep track of where\n # they are and save that information with the pattOcc so we don't have to go\n # through this procedure again (TODO)\n numTies = 0\n inSlice = allNotes[inStart:(inStart+numNotes)]\n for n in inSlice:\n if(n.tie is not None):\n if(n.tie.type == 'start'):\n numTies += 1\n\n # this new numNotes will work with music21\n numNotes += numTies\n\n pattOcc = allNotes[inStart:(inStart+numNotes)]\n\n return pattOcc\n\n\ndef getFeaturesForSongs(score):\n vec = {}\n\n mel = score.flat.notes.stream()\n noteNums = [x.pitch.midi for x in mel]\n intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]\n couInt = dict(Counter(intervals))\n for k in couInt.keys():\n couInt[k] /= len(intervals)\n\n vec['interval_probs'] = couInt\n vec['pitch_mean'] = np.mean(noteNums)\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)\n\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n\n couRtm = dict(Counter(noteDurs))\n for k in couRtm.keys():\n couRtm[k] /= len(noteDurs)\n\n vec['duration_probs'] = couRtm\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014\n\n # HISTOGRAMS:\n # interval counting\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n\n return vec\n\n\n# single method that is passed an entry from the motifs dict\n# and the database of songs and returns a dict that is a feature\n# vector for that motif.\ndef getFeaturesForOccurrences(cur_class, songs):\n\n max_length_occ = 10\n vec = {}\n mel = cur_class.score\n\n # for now just remove rests\n\n noteNums = [x.pitch.midi for x in mel]\n intervals = [noteNums[n] - noteNums[n-1] for n in range(1, len(noteNums))]\n\n highest = max(noteNums)\n lowest = min(noteNums)\n\n vec['numNotes'] = len(noteNums)\n\n vec['pitch_highest'] = highest\n vec['pitch_lowest'] = lowest\n vec['pitch_range'] = highest-lowest\n vec['pitch_num_classes'] = len(set(noteNums))\n vec['pitch_mean'] = np.mean(noteNums)\n vec['pitch_std'] = np.std(noteNums)\n vec['pitch_pos_highest'] = noteNums.index(highest) / len(noteNums)\n vec['pitch_pos_lowest'] = noteNums.index(lowest) / len(noteNums)\n\n # pitch counting\n for n in range(12):\n num = len([x for x in noteNums if abs(x) % 12 == n])\n vec['pitch_class_count_' + str(n)] = num / len(noteNums)\n\n vec['interval_max'] = max(np.abs(intervals))\n vec['interval_min'] = min(np.abs(intervals))\n vec['interval_largest_asc'] = max([max(intervals), 0])\n vec['interval_largest_desc'] = min([min(intervals), 0])\n vec['interval_mean'] = np.mean(np.abs(intervals))\n vec['interval_prop_small'] = sum([abs(intervals[n]) <= 2 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_prop_large'] = sum([abs(intervals[n]) >= 7 for n in range(0, len(intervals))]) / len(intervals)\n vec['interval_asc_or_desc'] = np.sign(noteNums[0] - noteNums[len(noteNums)-1])\n vec['interval_signs'] = sum(np.sign(intervals)) / len(intervals)\n\n # interval counting\n for n in range(13):\n num = len([x for x in intervals if abs(x) == n])\n vec['interval_count_' + str(n)] = num / len(intervals)\n\n # -1 if monotonically down, 1 if up, else 0\n if all([np.sign(x) == 1 for x in intervals]):\n vec['interval_strict_asc_or_desc'] = 1\n elif all([np.sign(x) == -1 for x in intervals]):\n vec['interval_strict_asc_or_desc'] = -1\n else:\n vec['interval_strict_asc_or_desc'] = 0\n\n # rhythmic properties\n noteDurs = [round(float(x.quarterLength), ROUND_DURS_DIGITS) for x in mel]\n vec['rhythm_duration'] = sum(noteDurs)\n vec['rhythm_longest_note'] = max(noteDurs)\n vec['rhythm_shortest_note'] = min(noteDurs)\n vec['rhythm_density'] = np.mean(noteDurs)\n vec['rhythm_variability'] = np.std([np.log(float(n)) for n in noteDurs]) # from Collins 2014\n vec['rhythm_last_note_duration'] = noteDurs[len(noteDurs)-1]\n\n # rhythm counting\n for n in range(-3, 3):\n num = len([x for x in noteDurs if 2**(n) <= x < 2**(n+1)])\n vec['rhythm_duration_count_' + str(n)] = num / len(noteDurs)\n\n # POLYFIT IDEA\n yCoords = [y - noteNums[0] for y in noteNums]\n xtemp = [float(x.offset) / vec['rhythm_duration'] for x in mel]\n xCoords = [x - xtemp[0] for x in xtemp]\n\n # print(str(xCoords) + \" vs \" + str(yCoords))\n polyFit1 = np.polyfit(xCoords, yCoords, 1, full=True)\n vec['polyfit_1'] = polyFit1[0][0]\n vec['polyfit_residual_1'] = 0\n if polyFit1[1].size > 0:\n vec['polyfit_residual_1'] = np.sqrt(polyFit1[1][0])\n\n vec['polyfit_2'] = 0\n vec['polyfit_residual_2'] = 0\n vec['polyfit_3'] = 0\n vec['polyfit_residual_3'] = 0\n\n if len(noteNums) >= 3:\n polyFit2 = np.polyfit(xCoords, yCoords, 2, full=True)\n vec['polyfit_2'] = polyFit2[0][0]\n if polyFit2[1].size > 0:\n vec['polyfit_residual_2'] = np.sqrt(polyFit2[1][0])\n\n if len(noteNums) >= 4:\n polyFit3 = np.polyfit(xCoords, yCoords, 3, full=True)\n vec['polyfit_3'] = polyFit3[0][0]\n if polyFit3[1].size > 0:\n vec['polyfit_residual_3'] = np.sqrt(polyFit3[1][0])\n\n # add sequence representation of occurrence\n zeros = [0 for i in range(max_length_occ)]\n for i in range(max_length_occ):\n vec['seq_note_' + str(i)] = (noteNums + zeros)[i]\n vec['seq_interval_' + str(i)] = (intervals + zeros)[i]\n vec['seq_rhythm_' + str(i)] = (noteDurs + zeros)[i]\n\n # differences between song and this motif\n songVec = songs[cur_class.songName].songFeatures\n\n song_diff_keys = [\n 'interval_mean',\n 'rhythm_variability',\n 'rhythm_density',\n 'interval_signs',\n 'pitch_mean',\n 'interval_prop_small',\n 'interval_prop_large'\n ]\n song_diff_keys += [x for x in vec.keys() if '_count' in x]\n\n for key in song_diff_keys:\n vec['diff_' + key] = songVec[key] - vec[key]\n\n # songScore = songs[motif['songName']]['score'].flat.notes.stream()\n# songScoreNums = [x.pitch.midi for x in songScore]\n\n# vec['intervalFollowing'] = 0\n# if motif['endInd'] + 1 < len(songScoreNums):\n# vec['intervalFollowing'] = songScoreNums[motif['endInd'] + 1] - noteNums[-1]\n# vec['intervalPreceding'] = 0\n# if motif['endInd'] - 1 > 0:\n# vec['intervalPreceding'] = songScoreNums[motif['endInd'] - 1] - noteNums[0]\n\n sumIntProbs = 1\n for i in intervals:\n sumIntProbs *= songVec['interval_probs'][i]\n vec['interval_log_expected_occurrences'] = np.log(sumIntProbs)\n\n sumDurProbs = 1\n for d in noteDurs:\n sumDurProbs *= songVec['duration_probs'][d]\n vec['rhythm_log_expected_occurrences'] = np.log(sumDurProbs)\n\n vec['rhythm_starts_on_downbeat'] = 0\n vec['rhythm_crosses_measure'] = 0\n vec['rhythm_start_beat_str'] = 0\n vec['rhythm_last_beat_str'] = 0\n try:\n noteBeats = [x.beat for x in mel]\n vec['rhythm_starts_on_downbeat'] = (noteBeats[0] == 1.0)\n vec['rhythm_crosses_measure'] = sum([noteBeats[n] < noteBeats[n-1] for n in range(1, len(noteBeats))]) > 0\n\n # figure out how to tell if note has associated time signature\n noteStr = [x.beatStrength for x in mel]\n vec['rhythm_start_beat_str'] = np.log(noteStr[0])\n vec['rhythm_last_beat_str'] = np.log(noteStr[len(noteStr)-1])\n except m21.Music21ObjectException:\n # this is not a good solution.\n pass\n\n # send it back\n return vec\n\n\ndef getFeaturesForClasses(patternClass, occs, songs):\n # take the average/std over all occurrences\n vec = {}\n\n vec['numOccs'] = len(patternClass.occNames)\n\n occFeatureKeys = occs[patternClass.occNames[0]].occFeatures.keys()\n\n for fk in occFeatureKeys:\n allOccVals = [occs[occName].occFeatures[fk] for occName in patternClass.occNames]\n vec[\"avg_\" + fk] = np.mean(allOccVals)\n vec[\"std_\" + fk] = np.std(allOccVals)\n\n scores = [occs[oc].score.flat for oc in patternClass.occNames]\n\n noteNums = [[x.pitch.midi for x in mel] for mel in scores]\n noteDurs = [[round(float(x.quarterLength), ROUND_DURS_DIGITS)\n for x in mel] for mel in scores]\n\n flatNums = [x for subList in noteNums for x in subList]\n vec['num_notes_total'] = len(flatNums)\n\n vec['unique_pitch_prop_content'] = \\\n len(set(tuple(x) for x in noteNums)) / vec['numOccs']\n\n vec['unique_rhythm_prop_content'] = \\\n len(set(tuple(x) for x in noteDurs)) / vec['numOccs']\n\n pitchAndDurs = [(noteNums[x] + noteDurs[x]) for x in range(0, vec['numOccs'])]\n\n vec['prop_unique_content'] = \\\n len(set(tuple(x) for x in pitchAndDurs)) / vec['numOccs']\n\n return vec\n\n\ndef filterPClassesWithKNN(annPClassNames, genPClassNames, kNearest, pClasses, pOccs):\n # so: we want to take a sample of our huge number of generated pattern classes\n # such that the number of occurrences and average cardinality doesn't easily\n # distinguish our sample from the annotated group.\n # perform a quick and dirty knn to get a bunch of generated class names\n # whose cardinalities and numOccs somewhat match the annotated data.\n indexPairs = np.arange(len(annPClassNames))\n indexPairs = np.concatenate([indexPairs, indexPairs])\n np.random.shuffle(indexPairs)\n indexPairs = np.split(indexPairs, len(indexPairs)/2)\n\n # deep copy!\n genPClassNamesCopy = list(genPClassNames)\n filtGenPClassNames = []\n\n for i in range(len(annPClassNames)):\n\n tar1 = pClasses[annPClassNames[indexPairs[i][0]]]\n tar2 = pClasses[annPClassNames[indexPairs[i][1]]]\n\n tarNumOccs = len(tar1.occNames)\n tar2Notes = [len(pOccs[on].score) for on in tar2.occNames]\n tarNumNotes = np.mean(tar2Notes)\n\n candidateNameList = []\n\n # calculate how close each generated class is to these parameters\n for gcn in genPClassNamesCopy:\n cand = pClasses[gcn]\n candNumOccs = len(cand.occNames)\n candNotes = [len(pOccs[on].score) for on in cand.occNames]\n candNumNotes = np.mean(candNotes)\n\n candScore = (candNumOccs - tarNumOccs)**2 + (candNumNotes - tarNumNotes)**2\n\n candidateNameList.append([candScore, gcn])\n\n # from the kNearest closest generated classes, choose one and remove\n # that one from the copy array\n candidateNameList = sorted(candidateNameList, key=lambda x: x[0])\n chop = candidateNameList[0:kNearest]\n choice = chop[np.random.choice(kNearest)][1]\n filtGenPClassNames.append(choice)\n genPClassNamesCopy.remove(choice)\n\n return filtGenPClassNames\n\n\ndef split_into_chunks(inp, num_chunks):\n\n chunk_len = int(np.floor(len(inp) / num_chunks))\n chunks = [inp[i:i + chunk_len] for i in range(0, len(inp), chunk_len)]\n if len(chunks) > num_chunks:\n for i, x in enumerate(chunks[num_chunks]):\n chunks[i].append(x)\n del chunks[num_chunks]\n\n return chunks\n\n\n# just for testing: get all features\n# plt.plot(sorted(inspectFeature('classAvg_pitch_mean',pClasses,genPClassNames + annPClassNames)))\ndef inspectFeature(featureName, table, tableNames, featsType=\"classFeatures\"):\n ret = []\n for tn in tableNames:\n item = table[tn]\n ret.append(item[featsType][featureName])\n return ret\n\n\ndef scatterFeatures(fn1, fn2, table, tableNames):\n\n xs = []\n ys = []\n types = []\n\n for tn in tableNames:\n item = table[tn]\n xs.append(item.classFeatures[fn1])\n ys.append(item.classFeatures[fn2])\n if item['type'] == 'ann':\n types.append('r')\n else:\n types.append('k')\n\n print(types)\n\n plt.scatter(xs, ys, c=types)\n plt.xlabel(fn1)\n plt.ylabel(fn2)\n plt.show()\n return\n",
"step-ids": [
3,
8,
9,
10,
11
]
}
|
[
3,
8,
9,
10,
11
] |
# Mezzanine Django Framework createdb error on Max OSX 10.9.2
import django
django.version
|
normal
|
{
"blob_id": "56afde2a31ad9dddee35e84609dff2eb0fc6fe1a",
"index": 9438,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndjango.version\n",
"step-3": "import django\ndjango.version\n",
"step-4": "# Mezzanine Django Framework createdb error on Max OSX 10.9.2\nimport django\ndjango.version\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class RLTrainer(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RLTrainer(object):
<|reserved_special_token_0|>
def __init__(self, config_, grid_search=False):
"""
Constructor
:param config_:
:param grid_search:
:return:
"""
self.config = config_
self.grid_search = grid_search
self.logger = logging.getLogger('cuda_logger')
self.expt_name = self.config['RL_parameters']['experiment']
self.objective = self.config['RL_parameters']['objective']
self.city_states_filename = self.config['RL_parameters'][
'city_states_filename']
self.training_tracker = TrainingTracker(self.config)
def run(self):
"""
Creates and runs training episode
:param:
:return:
"""
data_provider = DataProvider(self.config)
hex_attr_df = data_provider.read_hex_bin_attributes()
hex_distance_df = data_provider.read_hex_bin_distances()
city_states = data_provider.read_city_states(self.city_states_filename)
neighborhood = data_provider.read_neighborhood_data()
popular_bins = data_provider.read_popular_hex_bins()
num_episodes = self.config['RL_parameters']['num_episodes']
ind_episodes = self.config['RL_parameters']['ind_episodes']
exp_decay_multiplier = self.config['RL_parameters'][
'exp_decay_multiplier']
q_ind = None
r_table = None
xi_matrix = None
best_episode = None
best_model = {}
progress_bar = tqdm(xrange(num_episodes))
for episode_id in progress_bar:
progress_bar.set_description('Episode: {}'.format(episode_id))
current_best = -1000000
ind_exploration_factor = np.e ** (-1 * episode_id *
exp_decay_multiplier / ind_episodes)
episode = Episode(self.config, episode_id,
ind_exploration_factor, hex_attr_df, hex_distance_df,
city_states, neighborhood, popular_bins, q_ind, r_table,
xi_matrix)
tables = episode.run()
q_ind = tables['q_ind']
r_table = tables['r_table']
xi_matrix = tables['xi_matrix']
episode_tracker = tables['episode_tracker']
self.training_tracker.update_RL_tracker(episode_id,
episode_tracker.gross_earnings, episode_tracker.
successful_waits, episode_tracker.unsuccessful_waits,
episode_tracker.unmet_demand, episode_tracker.
relocation_rides, episode_tracker.DET, episode_tracker.DPRT,
episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)
if self.objective == 'revenue':
if episode_tracker.gross_earnings >= current_best:
best_episode = episode_tracker
current_best = best_episode.gross_earnings
elif episode_tracker.successful_waits >= current_best:
best_episode = episode_tracker
current_best = episode_tracker.successful_waits
best_model['ind_exploration_factor'] = ind_exploration_factor
best_model['config'] = self.config
best_model['q_ind'] = q_ind
best_model['r_table'] = r_table
best_model['xi_matrix'] = xi_matrix
best_model['training_tracker'] = self.training_tracker
self.logger.info(
'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(
self.expt_name, best_episode.gross_earnings, best_episode.
successful_waits, best_episode.unmet_demand))
return best_episode, best_model, self.training_tracker
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RLTrainer(object):
"""
Creates RL training object
"""
def __init__(self, config_, grid_search=False):
"""
Constructor
:param config_:
:param grid_search:
:return:
"""
self.config = config_
self.grid_search = grid_search
self.logger = logging.getLogger('cuda_logger')
self.expt_name = self.config['RL_parameters']['experiment']
self.objective = self.config['RL_parameters']['objective']
self.city_states_filename = self.config['RL_parameters'][
'city_states_filename']
self.training_tracker = TrainingTracker(self.config)
def run(self):
"""
Creates and runs training episode
:param:
:return:
"""
data_provider = DataProvider(self.config)
hex_attr_df = data_provider.read_hex_bin_attributes()
hex_distance_df = data_provider.read_hex_bin_distances()
city_states = data_provider.read_city_states(self.city_states_filename)
neighborhood = data_provider.read_neighborhood_data()
popular_bins = data_provider.read_popular_hex_bins()
num_episodes = self.config['RL_parameters']['num_episodes']
ind_episodes = self.config['RL_parameters']['ind_episodes']
exp_decay_multiplier = self.config['RL_parameters'][
'exp_decay_multiplier']
q_ind = None
r_table = None
xi_matrix = None
best_episode = None
best_model = {}
progress_bar = tqdm(xrange(num_episodes))
for episode_id in progress_bar:
progress_bar.set_description('Episode: {}'.format(episode_id))
current_best = -1000000
ind_exploration_factor = np.e ** (-1 * episode_id *
exp_decay_multiplier / ind_episodes)
episode = Episode(self.config, episode_id,
ind_exploration_factor, hex_attr_df, hex_distance_df,
city_states, neighborhood, popular_bins, q_ind, r_table,
xi_matrix)
tables = episode.run()
q_ind = tables['q_ind']
r_table = tables['r_table']
xi_matrix = tables['xi_matrix']
episode_tracker = tables['episode_tracker']
self.training_tracker.update_RL_tracker(episode_id,
episode_tracker.gross_earnings, episode_tracker.
successful_waits, episode_tracker.unsuccessful_waits,
episode_tracker.unmet_demand, episode_tracker.
relocation_rides, episode_tracker.DET, episode_tracker.DPRT,
episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)
if self.objective == 'revenue':
if episode_tracker.gross_earnings >= current_best:
best_episode = episode_tracker
current_best = best_episode.gross_earnings
elif episode_tracker.successful_waits >= current_best:
best_episode = episode_tracker
current_best = episode_tracker.successful_waits
best_model['ind_exploration_factor'] = ind_exploration_factor
best_model['config'] = self.config
best_model['q_ind'] = q_ind
best_model['r_table'] = r_table
best_model['xi_matrix'] = xi_matrix
best_model['training_tracker'] = self.training_tracker
self.logger.info(
'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(
self.expt_name, best_episode.gross_earnings, best_episode.
successful_waits, best_episode.unmet_demand))
return best_episode, best_model, self.training_tracker
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import division
import logging
import numpy as np
from data.data_provider import DataProvider
from episode.episode import Episode
from tracker import TrainingTracker
from tqdm import tqdm
class RLTrainer(object):
"""
Creates RL training object
"""
def __init__(self, config_, grid_search=False):
"""
Constructor
:param config_:
:param grid_search:
:return:
"""
self.config = config_
self.grid_search = grid_search
self.logger = logging.getLogger('cuda_logger')
self.expt_name = self.config['RL_parameters']['experiment']
self.objective = self.config['RL_parameters']['objective']
self.city_states_filename = self.config['RL_parameters'][
'city_states_filename']
self.training_tracker = TrainingTracker(self.config)
def run(self):
"""
Creates and runs training episode
:param:
:return:
"""
data_provider = DataProvider(self.config)
hex_attr_df = data_provider.read_hex_bin_attributes()
hex_distance_df = data_provider.read_hex_bin_distances()
city_states = data_provider.read_city_states(self.city_states_filename)
neighborhood = data_provider.read_neighborhood_data()
popular_bins = data_provider.read_popular_hex_bins()
num_episodes = self.config['RL_parameters']['num_episodes']
ind_episodes = self.config['RL_parameters']['ind_episodes']
exp_decay_multiplier = self.config['RL_parameters'][
'exp_decay_multiplier']
q_ind = None
r_table = None
xi_matrix = None
best_episode = None
best_model = {}
progress_bar = tqdm(xrange(num_episodes))
for episode_id in progress_bar:
progress_bar.set_description('Episode: {}'.format(episode_id))
current_best = -1000000
ind_exploration_factor = np.e ** (-1 * episode_id *
exp_decay_multiplier / ind_episodes)
episode = Episode(self.config, episode_id,
ind_exploration_factor, hex_attr_df, hex_distance_df,
city_states, neighborhood, popular_bins, q_ind, r_table,
xi_matrix)
tables = episode.run()
q_ind = tables['q_ind']
r_table = tables['r_table']
xi_matrix = tables['xi_matrix']
episode_tracker = tables['episode_tracker']
self.training_tracker.update_RL_tracker(episode_id,
episode_tracker.gross_earnings, episode_tracker.
successful_waits, episode_tracker.unsuccessful_waits,
episode_tracker.unmet_demand, episode_tracker.
relocation_rides, episode_tracker.DET, episode_tracker.DPRT,
episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)
if self.objective == 'revenue':
if episode_tracker.gross_earnings >= current_best:
best_episode = episode_tracker
current_best = best_episode.gross_earnings
elif episode_tracker.successful_waits >= current_best:
best_episode = episode_tracker
current_best = episode_tracker.successful_waits
best_model['ind_exploration_factor'] = ind_exploration_factor
best_model['config'] = self.config
best_model['q_ind'] = q_ind
best_model['r_table'] = r_table
best_model['xi_matrix'] = xi_matrix
best_model['training_tracker'] = self.training_tracker
self.logger.info(
'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(
self.expt_name, best_episode.gross_earnings, best_episode.
successful_waits, best_episode.unmet_demand))
return best_episode, best_model, self.training_tracker
<|reserved_special_token_1|>
"""
This class runs the RL Training
"""
from __future__ import division
import logging
import numpy as np
from data.data_provider import DataProvider
from episode.episode import Episode
from tracker import TrainingTracker
from tqdm import tqdm
class RLTrainer(object):
"""
Creates RL training object
"""
def __init__(self, config_, grid_search=False):
"""
Constructor
:param config_:
:param grid_search:
:return:
"""
self.config = config_
self.grid_search = grid_search
self.logger = logging.getLogger("cuda_logger")
self.expt_name = self.config['RL_parameters']['experiment']
self.objective = self.config['RL_parameters']['objective']
self.city_states_filename = self.config['RL_parameters']['city_states_filename']
# Create training tracker
self.training_tracker = TrainingTracker(self.config)
def run(self):
"""
Creates and runs training episode
:param:
:return:
"""
data_provider = DataProvider(self.config)
hex_attr_df = data_provider.read_hex_bin_attributes()
hex_distance_df = data_provider.read_hex_bin_distances()
city_states = data_provider.read_city_states(self.city_states_filename)
neighborhood = data_provider.read_neighborhood_data()
popular_bins = data_provider.read_popular_hex_bins()
num_episodes = self.config['RL_parameters']['num_episodes']
ind_episodes = self.config['RL_parameters']['ind_episodes']
exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']
q_ind = None
r_table = None
xi_matrix = None
best_episode = None
best_model = {}
progress_bar = tqdm(xrange(num_episodes))
for episode_id in progress_bar:
progress_bar.set_description("Episode: {}".format(episode_id))
current_best = -1000000
# Create episode
ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)
episode = Episode(self.config,
episode_id,
ind_exploration_factor,
hex_attr_df,
hex_distance_df,
city_states,
neighborhood,
popular_bins,
q_ind,
r_table,
xi_matrix)
# Run episode
tables = episode.run()
q_ind = tables['q_ind']
r_table = tables['r_table']
xi_matrix = tables['xi_matrix']
episode_tracker = tables['episode_tracker']
# Uncomment for logging if running a job, comment during experiments
# otherwise it leads to insanely huge logging output which is useless
# self.logger.info("""
# Expt: {} Episode: {} Earnings: {}
# Pax rides: {} Relocation rides: {} Unmet demand: {}
# """.format(self.expt_name, episode_id,
# episode_tracker.gross_earnings,
# episode_tracker.successful_waits,
# episode_tracker.relocation_rides,
# episode_tracker.unmet_demand))
# self.logger.info("----------------------------------")
self.training_tracker.update_RL_tracker(
episode_id, episode_tracker.gross_earnings,
episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,
episode_tracker.unmet_demand, episode_tracker.relocation_rides,
episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,
episode_tracker.DRT, episode_tracker.DCT)
# Keep track of the best episode
if self.objective == 'revenue':
if episode_tracker.gross_earnings >= current_best:
best_episode = episode_tracker
current_best = best_episode.gross_earnings
else: # self.objective == 'pickups':
if episode_tracker.successful_waits >= current_best:
best_episode = episode_tracker
current_best = episode_tracker.successful_waits
# Keep track of the best model
best_model['ind_exploration_factor'] = ind_exploration_factor
best_model['config'] = self.config
best_model['q_ind'] = q_ind
best_model['r_table'] = r_table
best_model['xi_matrix'] = xi_matrix
best_model['training_tracker'] = self.training_tracker
# After finishing training
self.logger.info("Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}".format(self.expt_name,
best_episode.gross_earnings,
best_episode.successful_waits,
best_episode.unmet_demand))
return best_episode, best_model, self.training_tracker
|
flexible
|
{
"blob_id": "7c004cb0c9eefa5e88f5085fb3b2878db98d2b20",
"index": 3200,
"step-1": "<mask token>\n\n\nclass RLTrainer(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RLTrainer(object):\n <mask token>\n\n def __init__(self, config_, grid_search=False):\n \"\"\"\n Constructor\n :param config_:\n :param grid_search:\n :return:\n \"\"\"\n self.config = config_\n self.grid_search = grid_search\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = self.config['RL_parameters']['experiment']\n self.objective = self.config['RL_parameters']['objective']\n self.city_states_filename = self.config['RL_parameters'][\n 'city_states_filename']\n self.training_tracker = TrainingTracker(self.config)\n\n def run(self):\n \"\"\"\n Creates and runs training episode\n :param:\n :return:\n \"\"\"\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters'][\n 'exp_decay_multiplier']\n q_ind = None\n r_table = None\n xi_matrix = None\n best_episode = None\n best_model = {}\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description('Episode: {}'.format(episode_id))\n current_best = -1000000\n ind_exploration_factor = np.e ** (-1 * episode_id *\n exp_decay_multiplier / ind_episodes)\n episode = Episode(self.config, episode_id,\n ind_exploration_factor, hex_attr_df, hex_distance_df,\n city_states, neighborhood, popular_bins, q_ind, r_table,\n xi_matrix)\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n self.training_tracker.update_RL_tracker(episode_id,\n episode_tracker.gross_earnings, episode_tracker.\n successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.\n relocation_rides, episode_tracker.DET, episode_tracker.DPRT,\n episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n elif episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n self.logger.info(\n 'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(\n self.expt_name, best_episode.gross_earnings, best_episode.\n successful_waits, best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker\n",
"step-3": "<mask token>\n\n\nclass RLTrainer(object):\n \"\"\"\n Creates RL training object\n \"\"\"\n\n def __init__(self, config_, grid_search=False):\n \"\"\"\n Constructor\n :param config_:\n :param grid_search:\n :return:\n \"\"\"\n self.config = config_\n self.grid_search = grid_search\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = self.config['RL_parameters']['experiment']\n self.objective = self.config['RL_parameters']['objective']\n self.city_states_filename = self.config['RL_parameters'][\n 'city_states_filename']\n self.training_tracker = TrainingTracker(self.config)\n\n def run(self):\n \"\"\"\n Creates and runs training episode\n :param:\n :return:\n \"\"\"\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters'][\n 'exp_decay_multiplier']\n q_ind = None\n r_table = None\n xi_matrix = None\n best_episode = None\n best_model = {}\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description('Episode: {}'.format(episode_id))\n current_best = -1000000\n ind_exploration_factor = np.e ** (-1 * episode_id *\n exp_decay_multiplier / ind_episodes)\n episode = Episode(self.config, episode_id,\n ind_exploration_factor, hex_attr_df, hex_distance_df,\n city_states, neighborhood, popular_bins, q_ind, r_table,\n xi_matrix)\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n self.training_tracker.update_RL_tracker(episode_id,\n episode_tracker.gross_earnings, episode_tracker.\n successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.\n relocation_rides, episode_tracker.DET, episode_tracker.DPRT,\n episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n elif episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n self.logger.info(\n 'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(\n self.expt_name, best_episode.gross_earnings, best_episode.\n successful_waits, best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker\n",
"step-4": "<mask token>\nfrom __future__ import division\nimport logging\nimport numpy as np\nfrom data.data_provider import DataProvider\nfrom episode.episode import Episode\nfrom tracker import TrainingTracker\nfrom tqdm import tqdm\n\n\nclass RLTrainer(object):\n \"\"\"\n Creates RL training object\n \"\"\"\n\n def __init__(self, config_, grid_search=False):\n \"\"\"\n Constructor\n :param config_:\n :param grid_search:\n :return:\n \"\"\"\n self.config = config_\n self.grid_search = grid_search\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = self.config['RL_parameters']['experiment']\n self.objective = self.config['RL_parameters']['objective']\n self.city_states_filename = self.config['RL_parameters'][\n 'city_states_filename']\n self.training_tracker = TrainingTracker(self.config)\n\n def run(self):\n \"\"\"\n Creates and runs training episode\n :param:\n :return:\n \"\"\"\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters'][\n 'exp_decay_multiplier']\n q_ind = None\n r_table = None\n xi_matrix = None\n best_episode = None\n best_model = {}\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description('Episode: {}'.format(episode_id))\n current_best = -1000000\n ind_exploration_factor = np.e ** (-1 * episode_id *\n exp_decay_multiplier / ind_episodes)\n episode = Episode(self.config, episode_id,\n ind_exploration_factor, hex_attr_df, hex_distance_df,\n city_states, neighborhood, popular_bins, q_ind, r_table,\n xi_matrix)\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n self.training_tracker.update_RL_tracker(episode_id,\n episode_tracker.gross_earnings, episode_tracker.\n successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.\n relocation_rides, episode_tracker.DET, episode_tracker.DPRT,\n episode_tracker.DWT, episode_tracker.DRT, episode_tracker.DCT)\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n elif episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n self.logger.info(\n 'Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}'.format(\n self.expt_name, best_episode.gross_earnings, best_episode.\n successful_waits, best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker\n",
"step-5": "\"\"\"\nThis class runs the RL Training\n\"\"\"\n\nfrom __future__ import division\nimport logging\nimport numpy as np\nfrom data.data_provider import DataProvider\nfrom episode.episode import Episode\nfrom tracker import TrainingTracker\nfrom tqdm import tqdm\n\n\nclass RLTrainer(object):\n \"\"\"\n Creates RL training object\n \"\"\"\n\n def __init__(self, config_, grid_search=False):\n \"\"\"\n Constructor\n :param config_:\n :param grid_search:\n :return:\n \"\"\"\n self.config = config_\n self.grid_search = grid_search\n self.logger = logging.getLogger(\"cuda_logger\")\n self.expt_name = self.config['RL_parameters']['experiment']\n self.objective = self.config['RL_parameters']['objective']\n self.city_states_filename = self.config['RL_parameters']['city_states_filename']\n\n # Create training tracker\n self.training_tracker = TrainingTracker(self.config)\n\n def run(self):\n \"\"\"\n Creates and runs training episode\n :param:\n :return:\n \"\"\"\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']\n\n q_ind = None\n r_table = None\n xi_matrix = None\n\n best_episode = None\n best_model = {}\n\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description(\"Episode: {}\".format(episode_id))\n current_best = -1000000\n\n # Create episode\n ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)\n\n episode = Episode(self.config,\n episode_id,\n ind_exploration_factor,\n hex_attr_df,\n hex_distance_df,\n city_states,\n neighborhood,\n popular_bins,\n q_ind,\n r_table,\n xi_matrix)\n\n # Run episode\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n\n # Uncomment for logging if running a job, comment during experiments\n # otherwise it leads to insanely huge logging output which is useless\n\n # self.logger.info(\"\"\"\n # Expt: {} Episode: {} Earnings: {}\n # Pax rides: {} Relocation rides: {} Unmet demand: {}\n # \"\"\".format(self.expt_name, episode_id,\n # episode_tracker.gross_earnings,\n # episode_tracker.successful_waits,\n # episode_tracker.relocation_rides,\n # episode_tracker.unmet_demand))\n # self.logger.info(\"----------------------------------\")\n\n self.training_tracker.update_RL_tracker(\n episode_id, episode_tracker.gross_earnings,\n episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.relocation_rides,\n episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,\n episode_tracker.DRT, episode_tracker.DCT)\n\n # Keep track of the best episode\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n else: # self.objective == 'pickups':\n if episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n\n # Keep track of the best model\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n\n # After finishing training\n self.logger.info(\"Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}\".format(self.expt_name,\n best_episode.gross_earnings,\n best_episode.successful_waits,\n best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
def quick_sort(arr):
q_sort(arr, 0, len(arr) - 1)
def q_sort(arr, left, right):
if left < right:
pivot_index = partition(arr, left, right)
q_sort(arr, left, pivot_index - 1)
q_sort(arr, pivot_index + 1, right)
def partition(arr, left, right):
pivot = arr[left]
while left < right:
# 如果列表后边的数比基准数大或相等, 则前移一位直到有比基准数小的数出现
while left < right and arr[right] >= pivot:
right -= 1
# 如找到, 则把第 right 个元素赋值给 left 位置,此时表中 left 和 right 的元素相等
arr[left] = arr[right]
# # 减少下一个循环的一次比较
# if left < right:
# left += 1
# 同样的方式比较前半区
while left < right and arr[left] <= pivot:
left += 1
arr[right] = arr[left]
# if left < right:
# right -= 1
# 做完一轮比较之后, 列表被分成了两个半区, 并且 left=right , 需要将这个数设置回 pivot
arr[left] = pivot
return left
def partition_1(arr, low, high):
pivot = arr[high]
store_index = low # 位置 store_index 存储较小元素
for i in range(low, high):
# 当前元素小于或等于 pivot
if arr[i] < pivot:
arr[store_index], arr[i] = arr[i], arr[store_index]
store_index += 1
arr[store_index], arr[high] = arr[high], arr[store_index]
return store_index
if __name__ == '__main__':
# arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]
arr = [5, 9, 1, 11, 6, 7, 2, 4]
quick_sort(arr)
print(arr)
|
normal
|
{
"blob_id": "09a5c96b7f496aca6b34d7f0a83d5b1e182ca409",
"index": 1627,
"step-1": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\n<mask token>\n",
"step-2": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\n<mask token>\n",
"step-3": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low\n for i in range(low, high):\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n return store_index\n\n\n<mask token>\n",
"step-4": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n while left < right:\n while left < right and arr[right] >= pivot:\n right -= 1\n arr[left] = arr[right]\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low\n for i in range(low, high):\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n return store_index\n\n\nif __name__ == '__main__':\n arr = [5, 9, 1, 11, 6, 7, 2, 4]\n quick_sort(arr)\n print(arr)\n",
"step-5": "def quick_sort(arr):\n q_sort(arr, 0, len(arr) - 1)\n\n\ndef q_sort(arr, left, right):\n if left < right:\n pivot_index = partition(arr, left, right)\n\n q_sort(arr, left, pivot_index - 1)\n q_sort(arr, pivot_index + 1, right)\n\n\ndef partition(arr, left, right):\n pivot = arr[left]\n\n while left < right:\n # 如果列表后边的数比基准数大或相等, 则前移一位直到有比基准数小的数出现\n while left < right and arr[right] >= pivot:\n right -= 1\n # 如找到, 则把第 right 个元素赋值给 left 位置,此时表中 left 和 right 的元素相等\n arr[left] = arr[right]\n # # 减少下一个循环的一次比较\n # if left < right:\n # left += 1\n\n # 同样的方式比较前半区\n while left < right and arr[left] <= pivot:\n left += 1\n arr[right] = arr[left]\n # if left < right:\n # right -= 1\n\n # 做完一轮比较之后, 列表被分成了两个半区, 并且 left=right , 需要将这个数设置回 pivot\n arr[left] = pivot\n return left\n\n\ndef partition_1(arr, low, high):\n pivot = arr[high]\n store_index = low # 位置 store_index 存储较小元素\n\n for i in range(low, high):\n # 当前元素小于或等于 pivot\n if arr[i] < pivot:\n arr[store_index], arr[i] = arr[i], arr[store_index]\n store_index += 1\n arr[store_index], arr[high] = arr[high], arr[store_index]\n\n return store_index\n\n\nif __name__ == '__main__':\n # arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]\n arr = [5, 9, 1, 11, 6, 7, 2, 4]\n quick_sort(arr)\n print(arr)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#grabbed the following from moses marsh -- https://github.com/sidetrackedmind/gimme-bus/blob/master/gimmebus/utilities.py
from datetime import datetime as dt
from math import radians, cos, sin, acos, asin, sqrt
import networkx as nx
## These functions will go in model.py for matching historical GPS
## positions to the defined route shapes
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2.)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], \
x['shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
def distance_from_segment(pt, seg_pt_1, seg_pt_2):
c = haversine(seg_pt_1, seg_pt_2)
b = haversine(seg_pt_1, pt)
a = haversine(seg_pt_2, pt)
num1 = (b**2 + c**2 - a**2)
num2 = (a**2 + c**2 - b**2)
if (num1 < 0) or (num2 < 0):
return min(a, b)
theta = acos( num1 / (2.*b*c))
h = b * sin(theta)
return h
|
normal
|
{
"blob_id": "89ce3d3ec9691ab8f54cc0d9d008e06c65b5f2cc",
"index": 7847,
"step-1": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\n<mask token>\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n num1 = b ** 2 + c ** 2 - a ** 2\n num2 = a ** 2 + c ** 2 - b ** 2\n if num1 < 0 or num2 < 0:\n return min(a, b)\n theta = acos(num1 / (2.0 * b * c))\n h = b * sin(theta)\n return h\n",
"step-4": "from datetime import datetime as dt\nfrom math import radians, cos, sin, acos, asin, sqrt\nimport networkx as nx\n\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r\n\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], x[\n 'shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n num1 = b ** 2 + c ** 2 - a ** 2\n num2 = a ** 2 + c ** 2 - b ** 2\n if num1 < 0 or num2 < 0:\n return min(a, b)\n theta = acos(num1 / (2.0 * b * c))\n h = b * sin(theta)\n return h\n",
"step-5": "#grabbed the following from moses marsh -- https://github.com/sidetrackedmind/gimme-bus/blob/master/gimmebus/utilities.py\n\nfrom datetime import datetime as dt\nfrom math import radians, cos, sin, acos, asin, sqrt\nimport networkx as nx\n\n## These functions will go in model.py for matching historical GPS\n## positions to the defined route shapes\n\ndef haversine(pt1, pt2):\n \"\"\"\n INPUT: tuples (lon1, lat1), (lon2, lat2)\n\n OUTPUT: The great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2.)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * r\n\ndef get_closest_shape_pt(lat, lon, shape):\n dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], \\\n x['shape_pt_lat']), (lon, lat)), axis=1)\n return dist.argmin()\n\ndef distance_along_route(pt_1_ind, pt_2_ind, shape):\n d1 = shape.loc[pt_1_ind]['shape_dist_traveled']\n d2 = shape.loc[pt_2_ind]['shape_dist_traveled']\n return d2 - d1\n\ndef distance_from_segment(pt, seg_pt_1, seg_pt_2):\n c = haversine(seg_pt_1, seg_pt_2)\n b = haversine(seg_pt_1, pt)\n a = haversine(seg_pt_2, pt)\n\n num1 = (b**2 + c**2 - a**2)\n num2 = (a**2 + c**2 - b**2)\n\n if (num1 < 0) or (num2 < 0):\n return min(a, b)\n\n theta = acos( num1 / (2.*b*c))\n h = b * sin(theta)\n\n return h\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from scipy.optimize import newton
from math import sqrt
import time
def GetRadius(Ri,DV,mu):
def f(Rf):
return sqrt(mu/Ri)*(sqrt(2*Rf/(Rf+Ri))-1)+sqrt(mu/Rf)*(1-sqrt(2*Ri/(Rf+Ri)))-DV
return newton(f,Ri)
if __name__ == '__main__':
starttime = time.time()
print(GetRadius(10000.0,23546.214671053374,(398600.*10**9)))
# time = time.time()-starttime
# print(time)
|
normal
|
{
"blob_id": "20722cf82371d176942e068e91b8fb38b4db61fd",
"index": 6951,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetRadius(Ri, DV, mu):\n\n def f(Rf):\n return sqrt(mu / Ri) * (sqrt(2 * Rf / (Rf + Ri)) - 1) + sqrt(mu / Rf\n ) * (1 - sqrt(2 * Ri / (Rf + Ri))) - DV\n return newton(f, Ri)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef GetRadius(Ri, DV, mu):\n\n def f(Rf):\n return sqrt(mu / Ri) * (sqrt(2 * Rf / (Rf + Ri)) - 1) + sqrt(mu / Rf\n ) * (1 - sqrt(2 * Ri / (Rf + Ri))) - DV\n return newton(f, Ri)\n\n\nif __name__ == '__main__':\n starttime = time.time()\n print(GetRadius(10000.0, 23546.214671053374, 398600.0 * 10 ** 9))\n",
"step-4": "from scipy.optimize import newton\nfrom math import sqrt\nimport time\n\n\ndef GetRadius(Ri, DV, mu):\n\n def f(Rf):\n return sqrt(mu / Ri) * (sqrt(2 * Rf / (Rf + Ri)) - 1) + sqrt(mu / Rf\n ) * (1 - sqrt(2 * Ri / (Rf + Ri))) - DV\n return newton(f, Ri)\n\n\nif __name__ == '__main__':\n starttime = time.time()\n print(GetRadius(10000.0, 23546.214671053374, 398600.0 * 10 ** 9))\n",
"step-5": "from scipy.optimize import newton\nfrom math import sqrt\nimport time\n\ndef GetRadius(Ri,DV,mu):\n\n def f(Rf):\n return sqrt(mu/Ri)*(sqrt(2*Rf/(Rf+Ri))-1)+sqrt(mu/Rf)*(1-sqrt(2*Ri/(Rf+Ri)))-DV\n\n return newton(f,Ri)\n\nif __name__ == '__main__':\n starttime = time.time()\n print(GetRadius(10000.0,23546.214671053374,(398600.*10**9)))\n # time = time.time()-starttime\n # print(time)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p ' + remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret,
port=port)
elif secret and port == 0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
os.system('rsync ' + ' '.join(arguments) + ' ' + item)
<|reserved_special_token_0|>
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput('find %s -name "*" -type %s' % (
local_path, type))
files = []
for file in local_out.split('\n'):
files.append(file)
return files
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 ' + host + ' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p ' + remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret,
port=port)
elif secret and port == 0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
os.system('rsync ' + ' '.join(arguments) + ' ' + item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
ssh_in, ssh_out, ssh_err = ssh.exec_command(
'find %s -name "*" -type %s' % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput('find %s -name "*" -type %s' % (
local_path, type))
files = []
for file in local_out.split('\n'):
files.append(file)
return files
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 ' + host + ' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p ' + remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret,
port=port)
elif secret and port == 0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
os.system('rsync ' + ' '.join(arguments) + ' ' + item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
ssh_in, ssh_out, ssh_err = ssh.exec_command(
'find %s -name "*" -type %s' % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput('find %s -name "*" -type %s' % (
local_path, type))
files = []
for file in local_out.split('\n'):
files.append(file)
return files
ip_check()
<|reserved_special_token_1|>
import os
import sys
import paramiko
import commands
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 ' + host + ' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p ' + remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret,
port=port)
elif secret and port == 0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
os.system('rsync ' + ' '.join(arguments) + ' ' + item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
ssh_in, ssh_out, ssh_err = ssh.exec_command(
'find %s -name "*" -type %s' % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput('find %s -name "*" -type %s' % (
local_path, type))
files = []
for file in local_out.split('\n'):
files.append(file)
return files
ip_check()
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import paramiko
import commands
def ip_check():
"""
Parses attributes for given hosts,
then checks if hosts are up
and then calls path_check function with working hosts.
"""
hosts = []
valid_hosts = []
for item in sys.argv:
if '@' in item:
hosts.append(item)
for i in hosts:
host = i.split('@')[1].split(':')[0]
command = os.system('ping -c 1 '+host+' > /dev/null')
if command == 0:
valid_hosts.append(i)
if valid_hosts:
path_check(valid_hosts)
def path_check(hosts):
"""
Parses username, port, host and local and remote path,
finds all local and remote files, using find_local_files and find_remote_files functions,
and then opens ssh session using paramiko for each given host.
"""
local_files = []
local_path = ''
for item in sys.argv:
if '–pass' in item:
secret = item.split('=')[1].strip("'")
break
else:
secret = ''
for item in sys.argv:
if '/' in item and '@' not in item:
local_path = item
if '.' in item and '/' not in item:
local_files.append(item)
if local_path:
local_files.append(find_local_files(local_path, 'f'))
for i in hosts:
user_port, host_remote_path = i.split('@')
if ':' in i:
host, remote_path = host_remote_path.split(':')
else:
host = host_remote_path
remote_path = ''
for separator in ',.:':
if separator in user_port:
user, port = user_port.split(separator)
break
else:
user = user_port
port = 0
ssh = open_sshclient(host, user, port, secret)
if not remote_path:
remote_path = local_path
ssh.exec_command('mkdir -p '+remote_path)
remote_files = find_remote_files(remote_path, 'f', ssh)
ssh.close()
copy_file(hosts)
def open_sshclient(host, user, port, secret):
"""
Opens ssh session using paramiko.
"""
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.load_system_host_keys()
if secret and port:
ssh_client.connect(hostname=host, username=user, password=secret, port=port)
elif secret and port==0:
ssh_client.connect(hostname=host, username=user, password=secret)
elif not secret and port:
ssh_client.connect(hostname=host, username=user, port=port)
else:
ssh_client.connect(hostname=host, username=user)
return ssh_client
def copy_file(hosts):
"""
Makes all needed operations according to given attributes with rsync.
"""
arguments = []
for item in sys.argv[1:]:
if '@' not in item and '–pass' not in item:
arguments.append(item)
for item in hosts:
# plz use .format for test strings concatenation
os.system('rsync '+' '.join(arguments)+' '+item)
def find_remote_files(remote_path, type, ssh):
"""
Finds all files or directories on remote machine, according to given attributes.
"""
(ssh_in, ssh_out, ssh_err) = ssh.exec_command("find %s -name \"*\" -type %s" % (remote_path, type))
files = []
for file in ssh_out.readlines():
files.append(file.rstrip())
return files
def find_local_files(local_path, type):
"""
Finds all files or directories on local machine, according to given attributes.
"""
local_out = commands.getoutput("find %s -name \"*\" -type %s" % (local_path, type))
files = []
for file in local_out.split("\n"):
files.append(file)
return files
ip_check()
|
flexible
|
{
"blob_id": "6e3aa677985d7bd91bfbbd2078665206839bac63",
"index": 3578,
"step-1": "<mask token>\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\n<mask token>\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\nip_check()\n",
"step-4": "import os\nimport sys\nimport paramiko\nimport commands\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 ' + host + ' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p ' + remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret,\n port=port)\n elif secret and port == 0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n os.system('rsync ' + ' '.join(arguments) + ' ' + item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n ssh_in, ssh_out, ssh_err = ssh.exec_command(\n 'find %s -name \"*\" -type %s' % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput('find %s -name \"*\" -type %s' % (\n local_path, type))\n files = []\n for file in local_out.split('\\n'):\n files.append(file)\n return files\n\n\nip_check()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport paramiko\nimport commands\n\n\ndef ip_check():\n \"\"\"\n Parses attributes for given hosts,\n then checks if hosts are up\n and then calls path_check function with working hosts.\n \"\"\"\n hosts = []\n valid_hosts = []\n for item in sys.argv:\n if '@' in item:\n hosts.append(item)\n for i in hosts:\n host = i.split('@')[1].split(':')[0]\n command = os.system('ping -c 1 '+host+' > /dev/null')\n if command == 0:\n valid_hosts.append(i)\n if valid_hosts:\n path_check(valid_hosts)\n\n\ndef path_check(hosts):\n \"\"\"\n Parses username, port, host and local and remote path,\n finds all local and remote files, using find_local_files and find_remote_files functions,\n and then opens ssh session using paramiko for each given host.\n \"\"\"\n local_files = []\n local_path = ''\n for item in sys.argv:\n if '–pass' in item:\n secret = item.split('=')[1].strip(\"'\")\n break\n else:\n secret = ''\n for item in sys.argv:\n if '/' in item and '@' not in item:\n local_path = item\n if '.' in item and '/' not in item:\n local_files.append(item)\n if local_path:\n local_files.append(find_local_files(local_path, 'f'))\n for i in hosts:\n user_port, host_remote_path = i.split('@')\n if ':' in i:\n host, remote_path = host_remote_path.split(':')\n else:\n host = host_remote_path\n remote_path = ''\n for separator in ',.:':\n if separator in user_port:\n user, port = user_port.split(separator)\n break\n else:\n user = user_port\n port = 0\n ssh = open_sshclient(host, user, port, secret)\n if not remote_path:\n remote_path = local_path\n ssh.exec_command('mkdir -p '+remote_path)\n remote_files = find_remote_files(remote_path, 'f', ssh)\n ssh.close()\n copy_file(hosts)\n\n\ndef open_sshclient(host, user, port, secret):\n \"\"\"\n Opens ssh session using paramiko.\n \"\"\"\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret, port=port)\n elif secret and port==0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client\n\n\ndef copy_file(hosts):\n \"\"\"\n Makes all needed operations according to given attributes with rsync.\n \"\"\"\n arguments = []\n for item in sys.argv[1:]:\n if '@' not in item and '–pass' not in item:\n arguments.append(item)\n for item in hosts:\n # plz use .format for test strings concatenation\n os.system('rsync '+' '.join(arguments)+' '+item)\n\n\ndef find_remote_files(remote_path, type, ssh):\n \"\"\"\n Finds all files or directories on remote machine, according to given attributes.\n \"\"\"\n (ssh_in, ssh_out, ssh_err) = ssh.exec_command(\"find %s -name \\\"*\\\" -type %s\" % (remote_path, type))\n files = []\n for file in ssh_out.readlines():\n files.append(file.rstrip())\n return files\n\n\ndef find_local_files(local_path, type):\n \"\"\"\n Finds all files or directories on local machine, according to given attributes.\n \"\"\"\n local_out = commands.getoutput(\"find %s -name \\\"*\\\" -type %s\" % (local_path, type))\n files = []\n for file in local_out.split(\"\\n\"):\n files.append(file)\n return files\n\nip_check()\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import argparse
import debug.debug as dbg
import helper.helper as hlp
import prep.preprocessor as pre
import sample.sample as s
def main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):
hlp.setup_logging()
# Files as folds?
if number_partitions is None or number_partitions == 0: # Yes
do_concat = False
partitions_from_files = True
early_subsampling = False
late_subsampling = True
else: # No
do_concat = True
partitions_from_files = False
early_subsampling = True
late_subsampling = False
if not do_subsampling:
early_subsampling = late_subsampling = False
X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,
do_concat=do_concat)
clf = s.get_svclassifier(C=C, gamma=gamma)
scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,
files_as_folds=partitions_from_files, do_subsampling=late_subsampling)
evaluation = s.get_eval_report(scores)
hlp.log(scores)
hlp.log(evaluation)
if write_labels:
dbg.write_list_to_dir(dir_train, y_pred, "y_pred.txt")
if do_concat:
dbg.write_list_to_dir(dir_train, y, "y_true.txt")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Print evaluation metrics for cross validating an HSV classifier.")
parser.add_argument("dir_train",
help="Directory containing all feature XMLs and label CSVs for cross validating the "
"classifier. CSVs need to have the same file name as their corresponding XML.")
parser.add_argument("-c", "--C_value", help="Omit the grid search and directly specify a C value.", type=float)
parser.add_argument("-g", "--gamma_value", help="Omit the grid search and directly specify a gamma value.",
type=float)
parser.add_argument("-p", "--number_partitions",
help="Set the number of partitions for cross validation. If omitted, take each file "
"as a partition.", type=int)
parser.add_argument("-s", "--subsampling", help="Subsample majority class", action="store_true")
parser.add_argument("-wl", "--write_labels",
help="Write both true and predicted labels of the eval file(s) to TXT files.",
action="store_true")
args = parser.parse_args()
main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)
|
normal
|
{
"blob_id": "4a63431aa71ca3f4b75fcd89a50bf599e7717645",
"index": 2442,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-4": "import argparse\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n if number_partitions is None or number_partitions == 0:\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else:\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train,\n do_subsampling=early_subsampling, do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=\n number_partitions, clf=clf, files_as_folds=partitions_from_files,\n do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, 'y_pred.txt')\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, 'y_true.txt')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Print evaluation metrics for cross validating an HSV classifier.')\n parser.add_argument('dir_train', help=\n 'Directory containing all feature XMLs and label CSVs for cross validating the classifier. CSVs need to have the same file name as their corresponding XML.'\n )\n parser.add_argument('-c', '--C_value', help=\n 'Omit the grid search and directly specify a C value.', type=float)\n parser.add_argument('-g', '--gamma_value', help=\n 'Omit the grid search and directly specify a gamma value.', type=float)\n parser.add_argument('-p', '--number_partitions', help=\n 'Set the number of partitions for cross validation. If omitted, take each file as a partition.'\n , type=int)\n parser.add_argument('-s', '--subsampling', help=\n 'Subsample majority class', action='store_true')\n parser.add_argument('-wl', '--write_labels', help=\n 'Write both true and predicted labels of the eval file(s) to TXT files.'\n , action='store_true')\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.\n number_partitions, args.subsampling, args.write_labels)\n",
"step-5": "import argparse\n\nimport debug.debug as dbg\nimport helper.helper as hlp\nimport prep.preprocessor as pre\nimport sample.sample as s\n\n\ndef main(dir_train, C, gamma, number_partitions, do_subsampling, write_labels):\n hlp.setup_logging()\n\n # Files as folds?\n if number_partitions is None or number_partitions == 0: # Yes\n do_concat = False\n partitions_from_files = True\n early_subsampling = False\n late_subsampling = True\n else: # No\n do_concat = True\n partitions_from_files = False\n early_subsampling = True\n late_subsampling = False\n\n if not do_subsampling:\n early_subsampling = late_subsampling = False\n\n X, y = pre.get_multiple_data_and_targets(dir_filepath=dir_train, do_subsampling=early_subsampling,\n do_concat=do_concat)\n clf = s.get_svclassifier(C=C, gamma=gamma)\n scores, y_pred = s.get_crossval_scores_prediction(X, y, n_folds=number_partitions, clf=clf,\n files_as_folds=partitions_from_files, do_subsampling=late_subsampling)\n evaluation = s.get_eval_report(scores)\n hlp.log(scores)\n hlp.log(evaluation)\n\n if write_labels:\n dbg.write_list_to_dir(dir_train, y_pred, \"y_pred.txt\")\n if do_concat:\n dbg.write_list_to_dir(dir_train, y, \"y_true.txt\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Print evaluation metrics for cross validating an HSV classifier.\")\n parser.add_argument(\"dir_train\",\n help=\"Directory containing all feature XMLs and label CSVs for cross validating the \"\n \"classifier. CSVs need to have the same file name as their corresponding XML.\")\n parser.add_argument(\"-c\", \"--C_value\", help=\"Omit the grid search and directly specify a C value.\", type=float)\n parser.add_argument(\"-g\", \"--gamma_value\", help=\"Omit the grid search and directly specify a gamma value.\",\n type=float)\n parser.add_argument(\"-p\", \"--number_partitions\",\n help=\"Set the number of partitions for cross validation. If omitted, take each file \"\n \"as a partition.\", type=int)\n parser.add_argument(\"-s\", \"--subsampling\", help=\"Subsample majority class\", action=\"store_true\")\n parser.add_argument(\"-wl\", \"--write_labels\",\n help=\"Write both true and predicted labels of the eval file(s) to TXT files.\",\n action=\"store_true\")\n args = parser.parse_args()\n main(args.dir_train, args.C_value, args.gamma_value, args.number_partitions, args.subsampling, args.write_labels)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
import re
from pathlib import Path
from typing import Dict, List, Optional, Type
from parameterized import parameterized_class # type: ignore
import materialize.optbench
import materialize.optbench.sql
from materialize.feature_benchmark.action import Action
from materialize.feature_benchmark.executor import Executor
from materialize.feature_benchmark.measurement_source import (
MeasurementSource,
Timestamp,
)
from materialize.feature_benchmark.scenario import Scenario
class OptbenchInit(Action):
def __init__(self, scenario: str, no_indexes: bool = False) -> None:
self._executor: Optional[Executor] = None
self._scenario = scenario
self._no_indexes = no_indexes
def run(self, executor: Optional[Executor] = None) -> None:
e = executor or self._executor
statements = materialize.optbench.sql.parse_from_file(
Path(f"misc/python/materialize/optbench/schema/{self._scenario}.sql")
)
if self._no_indexes:
idx_re = re.compile(r"(create|create\s+default|drop)\s+index\s+")
statements = [
statement
for statement in statements
if not idx_re.match(statement.lower())
]
e._composition.sql("\n".join(statements)) # type: ignore
class OptbenchRun(MeasurementSource):
def __init__(self, optbench_scenario: str, query: int):
self._executor: Optional[Executor] = None
self._optbench_scenario = optbench_scenario
self._query = query
def run(self, executor: Optional[Executor] = None) -> List[Timestamp]:
assert not (executor is None and self._executor is None)
assert not (executor is not None and self._executor is not None)
e = executor or self._executor
queries = materialize.optbench.sql.parse_from_file(
Path(
f"misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql"
)
)
assert 1 <= self._query <= len(queries)
query = queries[self._query - 1]
explain_query = materialize.optbench.sql.Query(query).explain(timing=True)
explain_output = materialize.optbench.sql.ExplainOutput(
e._composition.sql_query(explain_query)[0][0] # type: ignore
)
# Optimization time is in microseconds, divide by 3 to get a more readable number (still in wrong unit)
timestamps = [0, float(explain_output.optimization_time()) / 3] # type: ignore
return timestamps
def name_with_query(cls: Type["OptbenchTPCH"], num: int, params_dict: Dict) -> str:
return f"OptbenchTPCHQ{params_dict['QUERY']:02d}"
@parameterized_class(
[{"QUERY": i} for i in range(1, 23)], class_name_func=name_with_query
)
class OptbenchTPCH(Scenario):
"""Run optbench TPCH for optimizer benchmarks"""
QUERY = 1
def init(self) -> List[Action]:
return [OptbenchInit("tpch")]
def benchmark(self) -> MeasurementSource:
return OptbenchRun("tpch", self.QUERY)
|
normal
|
{
"blob_id": "97ca134ffce404f4b2bc7352d4aac73a7bb764bd",
"index": 5708,
"step-1": "<mask token>\n\n\nclass OptbenchRun(MeasurementSource):\n\n def __init__(self, optbench_scenario: str, query: int):\n self._executor: Optional[Executor] = None\n self._optbench_scenario = optbench_scenario\n self._query = query\n <mask token>\n\n\n<mask token>\n\n\n@parameterized_class([{'QUERY': i} for i in range(1, 23)], class_name_func=\n name_with_query)\nclass OptbenchTPCH(Scenario):\n \"\"\"Run optbench TPCH for optimizer benchmarks\"\"\"\n QUERY = 1\n\n def init(self) ->List[Action]:\n return [OptbenchInit('tpch')]\n\n def benchmark(self) ->MeasurementSource:\n return OptbenchRun('tpch', self.QUERY)\n",
"step-2": "<mask token>\n\n\nclass OptbenchInit(Action):\n <mask token>\n <mask token>\n\n\nclass OptbenchRun(MeasurementSource):\n\n def __init__(self, optbench_scenario: str, query: int):\n self._executor: Optional[Executor] = None\n self._optbench_scenario = optbench_scenario\n self._query = query\n\n def run(self, executor: Optional[Executor]=None) ->List[Timestamp]:\n assert not (executor is None and self._executor is None)\n assert not (executor is not None and self._executor is not None)\n e = executor or self._executor\n queries = materialize.optbench.sql.parse_from_file(Path(\n f'misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql'\n ))\n assert 1 <= self._query <= len(queries)\n query = queries[self._query - 1]\n explain_query = materialize.optbench.sql.Query(query).explain(timing\n =True)\n explain_output = materialize.optbench.sql.ExplainOutput(e.\n _composition.sql_query(explain_query)[0][0])\n timestamps = [0, float(explain_output.optimization_time()) / 3]\n return timestamps\n\n\n<mask token>\n\n\n@parameterized_class([{'QUERY': i} for i in range(1, 23)], class_name_func=\n name_with_query)\nclass OptbenchTPCH(Scenario):\n \"\"\"Run optbench TPCH for optimizer benchmarks\"\"\"\n QUERY = 1\n\n def init(self) ->List[Action]:\n return [OptbenchInit('tpch')]\n\n def benchmark(self) ->MeasurementSource:\n return OptbenchRun('tpch', self.QUERY)\n",
"step-3": "<mask token>\n\n\nclass OptbenchInit(Action):\n\n def __init__(self, scenario: str, no_indexes: bool=False) ->None:\n self._executor: Optional[Executor] = None\n self._scenario = scenario\n self._no_indexes = no_indexes\n\n def run(self, executor: Optional[Executor]=None) ->None:\n e = executor or self._executor\n statements = materialize.optbench.sql.parse_from_file(Path(\n f'misc/python/materialize/optbench/schema/{self._scenario}.sql'))\n if self._no_indexes:\n idx_re = re.compile('(create|create\\\\s+default|drop)\\\\s+index\\\\s+')\n statements = [statement for statement in statements if not\n idx_re.match(statement.lower())]\n e._composition.sql('\\n'.join(statements))\n\n\nclass OptbenchRun(MeasurementSource):\n\n def __init__(self, optbench_scenario: str, query: int):\n self._executor: Optional[Executor] = None\n self._optbench_scenario = optbench_scenario\n self._query = query\n\n def run(self, executor: Optional[Executor]=None) ->List[Timestamp]:\n assert not (executor is None and self._executor is None)\n assert not (executor is not None and self._executor is not None)\n e = executor or self._executor\n queries = materialize.optbench.sql.parse_from_file(Path(\n f'misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql'\n ))\n assert 1 <= self._query <= len(queries)\n query = queries[self._query - 1]\n explain_query = materialize.optbench.sql.Query(query).explain(timing\n =True)\n explain_output = materialize.optbench.sql.ExplainOutput(e.\n _composition.sql_query(explain_query)[0][0])\n timestamps = [0, float(explain_output.optimization_time()) / 3]\n return timestamps\n\n\ndef name_with_query(cls: Type['OptbenchTPCH'], num: int, params_dict: Dict\n ) ->str:\n return f\"OptbenchTPCHQ{params_dict['QUERY']:02d}\"\n\n\n@parameterized_class([{'QUERY': i} for i in range(1, 23)], class_name_func=\n name_with_query)\nclass OptbenchTPCH(Scenario):\n \"\"\"Run optbench TPCH for optimizer benchmarks\"\"\"\n QUERY = 1\n\n def init(self) ->List[Action]:\n return [OptbenchInit('tpch')]\n\n def benchmark(self) ->MeasurementSource:\n return OptbenchRun('tpch', self.QUERY)\n",
"step-4": "import re\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Type\nfrom parameterized import parameterized_class\nimport materialize.optbench\nimport materialize.optbench.sql\nfrom materialize.feature_benchmark.action import Action\nfrom materialize.feature_benchmark.executor import Executor\nfrom materialize.feature_benchmark.measurement_source import MeasurementSource, Timestamp\nfrom materialize.feature_benchmark.scenario import Scenario\n\n\nclass OptbenchInit(Action):\n\n def __init__(self, scenario: str, no_indexes: bool=False) ->None:\n self._executor: Optional[Executor] = None\n self._scenario = scenario\n self._no_indexes = no_indexes\n\n def run(self, executor: Optional[Executor]=None) ->None:\n e = executor or self._executor\n statements = materialize.optbench.sql.parse_from_file(Path(\n f'misc/python/materialize/optbench/schema/{self._scenario}.sql'))\n if self._no_indexes:\n idx_re = re.compile('(create|create\\\\s+default|drop)\\\\s+index\\\\s+')\n statements = [statement for statement in statements if not\n idx_re.match(statement.lower())]\n e._composition.sql('\\n'.join(statements))\n\n\nclass OptbenchRun(MeasurementSource):\n\n def __init__(self, optbench_scenario: str, query: int):\n self._executor: Optional[Executor] = None\n self._optbench_scenario = optbench_scenario\n self._query = query\n\n def run(self, executor: Optional[Executor]=None) ->List[Timestamp]:\n assert not (executor is None and self._executor is None)\n assert not (executor is not None and self._executor is not None)\n e = executor or self._executor\n queries = materialize.optbench.sql.parse_from_file(Path(\n f'misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql'\n ))\n assert 1 <= self._query <= len(queries)\n query = queries[self._query - 1]\n explain_query = materialize.optbench.sql.Query(query).explain(timing\n =True)\n explain_output = materialize.optbench.sql.ExplainOutput(e.\n _composition.sql_query(explain_query)[0][0])\n timestamps = [0, float(explain_output.optimization_time()) / 3]\n return timestamps\n\n\ndef name_with_query(cls: Type['OptbenchTPCH'], num: int, params_dict: Dict\n ) ->str:\n return f\"OptbenchTPCHQ{params_dict['QUERY']:02d}\"\n\n\n@parameterized_class([{'QUERY': i} for i in range(1, 23)], class_name_func=\n name_with_query)\nclass OptbenchTPCH(Scenario):\n \"\"\"Run optbench TPCH for optimizer benchmarks\"\"\"\n QUERY = 1\n\n def init(self) ->List[Action]:\n return [OptbenchInit('tpch')]\n\n def benchmark(self) ->MeasurementSource:\n return OptbenchRun('tpch', self.QUERY)\n",
"step-5": "# Copyright Materialize, Inc. and contributors. All rights reserved.\n#\n# Use of this software is governed by the Business Source License\n# included in the LICENSE file at the root of this repository.\n#\n# As of the Change Date specified in that file, in accordance with\n# the Business Source License, use of this software will be governed\n# by the Apache License, Version 2.0.\n\n\nimport re\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Type\n\nfrom parameterized import parameterized_class # type: ignore\n\nimport materialize.optbench\nimport materialize.optbench.sql\nfrom materialize.feature_benchmark.action import Action\nfrom materialize.feature_benchmark.executor import Executor\nfrom materialize.feature_benchmark.measurement_source import (\n MeasurementSource,\n Timestamp,\n)\nfrom materialize.feature_benchmark.scenario import Scenario\n\n\nclass OptbenchInit(Action):\n def __init__(self, scenario: str, no_indexes: bool = False) -> None:\n self._executor: Optional[Executor] = None\n self._scenario = scenario\n self._no_indexes = no_indexes\n\n def run(self, executor: Optional[Executor] = None) -> None:\n e = executor or self._executor\n statements = materialize.optbench.sql.parse_from_file(\n Path(f\"misc/python/materialize/optbench/schema/{self._scenario}.sql\")\n )\n if self._no_indexes:\n idx_re = re.compile(r\"(create|create\\s+default|drop)\\s+index\\s+\")\n statements = [\n statement\n for statement in statements\n if not idx_re.match(statement.lower())\n ]\n e._composition.sql(\"\\n\".join(statements)) # type: ignore\n\n\nclass OptbenchRun(MeasurementSource):\n def __init__(self, optbench_scenario: str, query: int):\n self._executor: Optional[Executor] = None\n self._optbench_scenario = optbench_scenario\n self._query = query\n\n def run(self, executor: Optional[Executor] = None) -> List[Timestamp]:\n assert not (executor is None and self._executor is None)\n assert not (executor is not None and self._executor is not None)\n e = executor or self._executor\n\n queries = materialize.optbench.sql.parse_from_file(\n Path(\n f\"misc/python/materialize/optbench/workload/{self._optbench_scenario}.sql\"\n )\n )\n assert 1 <= self._query <= len(queries)\n query = queries[self._query - 1]\n explain_query = materialize.optbench.sql.Query(query).explain(timing=True)\n explain_output = materialize.optbench.sql.ExplainOutput(\n e._composition.sql_query(explain_query)[0][0] # type: ignore\n )\n # Optimization time is in microseconds, divide by 3 to get a more readable number (still in wrong unit)\n timestamps = [0, float(explain_output.optimization_time()) / 3] # type: ignore\n return timestamps\n\n\ndef name_with_query(cls: Type[\"OptbenchTPCH\"], num: int, params_dict: Dict) -> str:\n return f\"OptbenchTPCHQ{params_dict['QUERY']:02d}\"\n\n\n@parameterized_class(\n [{\"QUERY\": i} for i in range(1, 23)], class_name_func=name_with_query\n)\nclass OptbenchTPCH(Scenario):\n \"\"\"Run optbench TPCH for optimizer benchmarks\"\"\"\n\n QUERY = 1\n\n def init(self) -> List[Action]:\n return [OptbenchInit(\"tpch\")]\n\n def benchmark(self) -> MeasurementSource:\n return OptbenchRun(\"tpch\", self.QUERY)\n",
"step-ids": [
7,
9,
12,
13,
14
]
}
|
[
7,
9,
12,
13,
14
] |
import pygame
import sys
# класс для хранения настроек
class Settings():
"""docstring for Setting"""
def __init__(self):
# параметры экрана
self.colour = (230, 230, 230)
self.screen_width = 1200
self.screen_height = 800
# параметры коробля
self.ship_speed = 1.5
# параметры пули
self.bullet_speed = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60,60,60)
# скорость и перемещение флота
self.alien_speed = 1
self.alien_fleet = 1
self.alien_fleet_drop_speed = 10
|
normal
|
{
"blob_id": "2402188380bc0189b88e3cfcbaabf64a9919b3d5",
"index": 8810,
"step-1": "<mask token>\n\n\nclass Settings:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Settings:\n <mask token>\n\n def __init__(self):\n self.colour = 230, 230, 230\n self.screen_width = 1200\n self.screen_height = 800\n self.ship_speed = 1.5\n self.bullet_speed = 1\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 60, 60, 60\n self.alien_speed = 1\n self.alien_fleet = 1\n self.alien_fleet_drop_speed = 10\n",
"step-3": "<mask token>\n\n\nclass Settings:\n \"\"\"docstring for Setting\"\"\"\n\n def __init__(self):\n self.colour = 230, 230, 230\n self.screen_width = 1200\n self.screen_height = 800\n self.ship_speed = 1.5\n self.bullet_speed = 1\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 60, 60, 60\n self.alien_speed = 1\n self.alien_fleet = 1\n self.alien_fleet_drop_speed = 10\n",
"step-4": "import pygame\nimport sys\n\n\nclass Settings:\n \"\"\"docstring for Setting\"\"\"\n\n def __init__(self):\n self.colour = 230, 230, 230\n self.screen_width = 1200\n self.screen_height = 800\n self.ship_speed = 1.5\n self.bullet_speed = 1\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = 60, 60, 60\n self.alien_speed = 1\n self.alien_fleet = 1\n self.alien_fleet_drop_speed = 10\n",
"step-5": "import pygame\nimport sys\n\n# класс для хранения настроек\nclass Settings():\n\t\"\"\"docstring for Setting\"\"\"\n\tdef __init__(self):\n\t\t# параметры экрана\n\t\tself.colour = (230, 230, 230)\n\t\tself.screen_width = 1200\n\t\tself.screen_height = 800\t\n\t\t# параметры коробля\n\t\tself.ship_speed = 1.5\n\n\t\t# параметры пули\n\t\tself.bullet_speed = 1\n\t\tself.bullet_width = 3\n\t\tself.bullet_height = 15\n\t\tself.bullet_color = (60,60,60)\n\n\t\t# скорость и перемещение флота\n\t\tself.alien_speed = 1\n\t\tself.alien_fleet = 1\n\t\tself.alien_fleet_drop_speed = 10",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pygame
import time
import math
from pygame.locals import *
from pygux.widgets.widget import Widget, hlBox
from pygux.colours import Colours
class Sprite(Widget):
def __init__(self, x, y, w, h, image=None, callback=None, **kw):
"""Sprite widget
"""
Widget.__init__(self, x, y, w, h, **kw)
if image:
self.image = pygame.image.load(image).convert()
else:
self.image = None
self.callback = callback
def draw(self):
surface = pygame.Surface((self.w, self.h), pygame.SRCALPHA)
if self.image:
my_img = pygame.transform.smoothscale(self.image, (self.w, self.h))
surface.blit(my_img, (0, 0))
self.parent.surface.blit(surface, (self.x, self.y))
def touched(self, position):
if self.toggle:
self.state = not self.state
if self.callback:
self.refresh = self.callback(self)
else:
self.refresh = True
if self.refresh:
self.parent.update()
def update(self):
if self.refresh:
self.refresh = False
self.draw()
return True
|
normal
|
{
"blob_id": "0003d104a4dcd5a5b2357016cbc0317738c2cd3c",
"index": 2007,
"step-1": "<mask token>\n\n\nclass Sprite(Widget):\n\n def __init__(self, x, y, w, h, image=None, callback=None, **kw):\n \"\"\"Sprite widget\n \"\"\"\n Widget.__init__(self, x, y, w, h, **kw)\n if image:\n self.image = pygame.image.load(image).convert()\n else:\n self.image = None\n self.callback = callback\n\n def draw(self):\n surface = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\n if self.image:\n my_img = pygame.transform.smoothscale(self.image, (self.w, self.h))\n surface.blit(my_img, (0, 0))\n self.parent.surface.blit(surface, (self.x, self.y))\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Sprite(Widget):\n\n def __init__(self, x, y, w, h, image=None, callback=None, **kw):\n \"\"\"Sprite widget\n \"\"\"\n Widget.__init__(self, x, y, w, h, **kw)\n if image:\n self.image = pygame.image.load(image).convert()\n else:\n self.image = None\n self.callback = callback\n\n def draw(self):\n surface = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\n if self.image:\n my_img = pygame.transform.smoothscale(self.image, (self.w, self.h))\n surface.blit(my_img, (0, 0))\n self.parent.surface.blit(surface, (self.x, self.y))\n <mask token>\n\n def update(self):\n if self.refresh:\n self.refresh = False\n self.draw()\n return True\n",
"step-3": "<mask token>\n\n\nclass Sprite(Widget):\n\n def __init__(self, x, y, w, h, image=None, callback=None, **kw):\n \"\"\"Sprite widget\n \"\"\"\n Widget.__init__(self, x, y, w, h, **kw)\n if image:\n self.image = pygame.image.load(image).convert()\n else:\n self.image = None\n self.callback = callback\n\n def draw(self):\n surface = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\n if self.image:\n my_img = pygame.transform.smoothscale(self.image, (self.w, self.h))\n surface.blit(my_img, (0, 0))\n self.parent.surface.blit(surface, (self.x, self.y))\n\n def touched(self, position):\n if self.toggle:\n self.state = not self.state\n if self.callback:\n self.refresh = self.callback(self)\n else:\n self.refresh = True\n if self.refresh:\n self.parent.update()\n\n def update(self):\n if self.refresh:\n self.refresh = False\n self.draw()\n return True\n",
"step-4": "import pygame\nimport time\nimport math\nfrom pygame.locals import *\nfrom pygux.widgets.widget import Widget, hlBox\nfrom pygux.colours import Colours\n\n\nclass Sprite(Widget):\n\n def __init__(self, x, y, w, h, image=None, callback=None, **kw):\n \"\"\"Sprite widget\n \"\"\"\n Widget.__init__(self, x, y, w, h, **kw)\n if image:\n self.image = pygame.image.load(image).convert()\n else:\n self.image = None\n self.callback = callback\n\n def draw(self):\n surface = pygame.Surface((self.w, self.h), pygame.SRCALPHA)\n if self.image:\n my_img = pygame.transform.smoothscale(self.image, (self.w, self.h))\n surface.blit(my_img, (0, 0))\n self.parent.surface.blit(surface, (self.x, self.y))\n\n def touched(self, position):\n if self.toggle:\n self.state = not self.state\n if self.callback:\n self.refresh = self.callback(self)\n else:\n self.refresh = True\n if self.refresh:\n self.parent.update()\n\n def update(self):\n if self.refresh:\n self.refresh = False\n self.draw()\n return True\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.create_all()
Patient.add_patient()
Appointment.add_appointment()
PhoneCalls.add_call()
<|reserved_special_token_1|>
from config import SQLALCHEMY_DATABASE_URI
from app.models import Patient, Appointment, PhoneCalls
from app import db
import os.path
db.create_all()
Patient.add_patient()
Appointment.add_appointment()
PhoneCalls.add_call()
<|reserved_special_token_1|>
#!flask/bin/python
from config import SQLALCHEMY_DATABASE_URI
from app.models import Patient, Appointment, PhoneCalls
from app import db
import os.path
db.create_all()
# Patient.generate_fake();
# Appointment.generate_fake();
# PhoneCalls.generate_fake();
Patient.add_patient();
Appointment.add_appointment();
PhoneCalls.add_call();
|
flexible
|
{
"blob_id": "173e6017884a1a4df64018b306ea71bcaa1c5f1d",
"index": 4528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-3": "from config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-4": "#!flask/bin/python\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\n\n# Patient.generate_fake();\n# Appointment.generate_fake();\n# PhoneCalls.generate_fake();\n\nPatient.add_patient();\nAppointment.add_appointment();\nPhoneCalls.add_call();",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import sys
with open(sys.argv[1], 'r') as f:
x = json.load(f)
with open('my_wire_to_quartus_wire.json', 'r') as f:
wirenamemap = json.load(f)
print("----- There are {} muxes in the database".format(len(x)))
print("----- There are {} routing pairs in the database".format(sum((len(v) for k, v in x.items()))))
def bits2str(bits):
ret = ""
for row in bits:
rowstr = ""
for bit in row:
rowstr += "1" if bit else "0"
ret += rowstr + '\n'
return ret
def parse_xyi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert ipos > ypos
return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]))
def parse_xysi(inp):
xpos = inp.find('X')
ypos = inp.find('Y')
spos = inp.find('S')
ipos = inp.find('I')
assert xpos >= 0
assert ypos > xpos
assert spos > ypos
assert ipos > spos
sval = int(inp[spos + 1:ipos])
assert sval == 0
return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]))
def anybits(bits):
for y in bits:
for x in y:
if not x:
return True
return False
def decodemux(bits):
A = not bits[0][0]
B = not bits[0][1]
C = not bits[0][2]
D = not bits[0][3]
E = not bits[1][0]
F = not bits[1][1]
G = not bits[1][2]
H = not bits[1][3]
assert G + C + D + H == 1
assert A + B + E + F == 1 or (A + B + E + F == 0 and G)
if G:
assert A + B + C + D + E + F + H == 0
if G:
return 0
if C:
if A: return 1
if B: return 2
if E: return 3
if F: return 4
if D:
if A: return 5
if B: return 6
if E: return 7
if F: return 8
if H:
if A: return 9
if B: return 10
if E: return 11
if F: return 12
def flipv(muxbits):
return muxbits[::-1]
def fliph(muxbits):
return [x[::-1] for x in muxbits]
# # print(x)
# uniq_r_muxes = []
# for _ in range(8):
# uniq_r_muxes.append(set())
# for X in range(2, 8):
# for Y in range(1, 5):
# for N in range(8):
# mux = "R:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_r_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(8):
# print("~~~~~ R{} ~~~~~".format(N))
# for xx in sorted(list(uniq_r_muxes[N])):
# print(xx)
# # print(x)
# uniq_l_muxes = []
# for _ in range(8):
# uniq_l_muxes.append(set())
# # print(x)
# uniq_l2_muxes = []
# for _ in range(8):
# uniq_l2_muxes.append(set())
# for X in [8]:
# for Y in range(1, 5):
# for N in range(8):
# mux = "L2:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l2_muxes[N].add(bits2str(muxbits))
# # print(uniq_l2_muxes)
# for N in range(8):
# print("~~~~~ L2:{} ~~~~~".format(N))
# for xx in sorted(list(uniq_l2_muxes[N])):
# print(xx)
# # print(x)
# uniq_l_muxes = []
# for _ in range(8):
# uniq_l_muxes.append(set())
# for X in range(3, 9):
# for Y in range(1, 5):
# for N in range(8):
# mux = "L:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l_muxes[N].add(bits2str(muxbits))
# # print(uniq_l_muxes)
# for N in range(8):
# print("~~~~~ L{} ~~~~~".format(N))
# for xx in sorted(list(uniq_l_muxes[N])):
# print(xx)
# uniq_u_muxes = []
# for _ in range(7):
# uniq_u_muxes.append(set())
# for X in [8]:#range(2, 8):
# for Y in range(1, 5):
# for N in range(7):
# mux = "U:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_u_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(7):
# print("~~~~~ U{} ~~~~~".format(N))
# for xx in sorted(list(uniq_u_muxes[N])):
# print(xx)
# uniq_d_muxes = []
# for _ in range(7):
# uniq_d_muxes.append(set())
# for X in [8]:#range(2, 8):
# for Y in range(1, 5):
# for N in range(7):
# mux = "D:X{}Y{}I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_d_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(7):
# print("~~~~~ D{} ~~~~~".format(N))
# for xx in sorted(list(uniq_d_muxes[N])):
# print(xx)
# uniq_l_li_muxes = []
# for _ in range(18):
# uniq_l_li_muxes.append(set())
# for Y in range(1, 5):
# for N in range(18):
# mux = "LOCAL_INTERCONNECT:X1Y{}S0I{}".format(Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_l_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(18):
# print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_l_li_muxes[N])):
# print(xx)
# uniq_li_muxes = []
# for _ in range(26):
# uniq_li_muxes.append(set())
# for X in range(2, 8):
# for Y in range(1, 5):
# for N in range(26):
# mux = "LOCAL_INTERCONNECT:X{}Y{}S0I{}".format(X, Y, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(26):
# print("~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_li_muxes[N])):
# print(xx)
# uniq_top_li_muxes = []
# for _ in range(10):
# uniq_top_li_muxes.append(set())
# for X in range(2, 8):
# for N in range(10):
# mux = "LOCAL_INTERCONNECT:X{}Y5S0I{}".format(X, N)
# muxvals = x[mux]
# # print(muxvals)
# for muxsrc, muxbits in muxvals.items():
# uniq_top_li_muxes[N].add(bits2str(muxbits))
# # print(uniq_r_muxes)
# for N in range(10):
# print("~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~".format(N))
# for xx in sorted(list(uniq_top_li_muxes[N])):
# print(xx)
LABELS = [
"|G|C|D|H|A|B|E|F|",
"|0| | | | | | | | ",
"| |0| | |0| | | | ",
"| |0| | | |0| | | ",
"| |0| | | | |0| | ",
"| |0| | | | | |0| ",
"| | |0| |0| | | | ",
"| | |0| | |0| | | ",
"| | |0| | | |0| | ",
"| | |0| | | | |0| ",
"| | | |0|0| | | | ",
"| | | |0| |0| | | ",
"| | | |0| | |0| | ",
"| | | |0| | | |0| ",
]
for dst, srcs in x.items():
srcs_decoded = [None] * 13
is_tb_io = False
for src, muxbits in srcs.items():
if dst.startswith("R:"):
_, _, I = parse_xyi(dst)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("L:") or dst.startswith("L2"):
_, _, I = parse_xyi(dst)
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("U:"):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 0 and X != 8:
muxbits = fliph(muxbits)
if I >= 4:
muxbits = flipv(muxbits)
elif dst.startswith("D:"):
X, _, I = parse_xyi(dst)
if X == 8:
muxbits = fliph(muxbits)
if I == 6 and X != 8:
muxbits = fliph(muxbits)
if I >= 3:
muxbits = flipv(muxbits)
elif dst.startswith("LOCAL_INTERCONNECT:"):
X, Y, I = parse_xysi(dst[19:])
if X == 1:
muxbits = fliph(muxbits)
if I > 8:
muxbits = flipv(muxbits)
elif X == 8:
if I > 8:
muxbits = flipv(muxbits)
else:
if Y == 0 or Y == 5:
is_tb_io = True
if Y == 0:
muxbits = flipv(muxbits)
if I < 5:
muxbits = fliph(muxbits)
else:
if I in range(0, 5) or I in range(13, 18):
muxbits = fliph(muxbits)
if I >= 13:
muxbits = flipv(muxbits)
else:
continue
muxidx = decodemux(muxbits)
if srcs_decoded[muxidx] is not None:
print(dst, src, srcs_decoded[muxidx])
assert srcs_decoded[muxidx] is None
srcs_decoded[muxidx] = src
print("~~~~~ {} ~~~~~".format(dst))
print(LABELS[0])
if is_tb_io:
assert srcs_decoded[0] is None
for i in range(len(srcs_decoded)):
if is_tb_io and i == 0:
continue
print(LABELS[i + 1], end='')
src = srcs_decoded[i]
if src is None:
print("???")
else:
print(src, end='')
if src in wirenamemap:
print(" ({})".format(wirenamemap[src]))
else:
print()
# if dst.startswith("LOCAL_INTERCONNECT:"):
# continue
# print(dst, src)
# if dst.startswith("L:"):
# _, _, I = parse_xyi(dst)
# muxbits = fliph(muxbits)
# if I >= 4:
# muxbits = flipv(muxbits)
# if dst.startswith("R:"):
# _, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# if dst.startswith("D:"):
# X, _, I = parse_xyi(dst)
# if I >= 3:
# muxbits = flipv(muxbits)
# if I == 6:
# muxbits = fliph(muxbits)
# if X == 8:
# muxbits = fliph(muxbits)
# if dst.startswith("U:"):
# X, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# if I == 0:
# muxbits = fliph(muxbits)
# if X == 8:
# muxbits = fliph(muxbits)
# if dst.startswith("L2:"):
# _, _, I = parse_xyi(dst)
# if I >= 4:
# muxbits = flipv(muxbits)
# decodemux(muxbits)
|
normal
|
{
"blob_id": "95163a28a35cc88240d9d6edc2e9b416e5493909",
"index": 6021,
"step-1": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\n<mask token>\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n",
"step-4": "import json\nimport sys\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\nprint('----- There are {} muxes in the database'.format(len(x)))\nprint('----- There are {} routing pairs in the database'.format(sum(len(v) for\n k, v in x.items())))\n\n\ndef bits2str(bits):\n ret = ''\n for row in bits:\n rowstr = ''\n for bit in row:\n rowstr += '1' if bit else '0'\n ret += rowstr + '\\n'\n return ret\n\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]\n )\n\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n return int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]\n )\n\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or A + B + E + F == 0 and G\n if G:\n assert A + B + C + D + E + F + H == 0\n if G:\n return 0\n if C:\n if A:\n return 1\n if B:\n return 2\n if E:\n return 3\n if F:\n return 4\n if D:\n if A:\n return 5\n if B:\n return 6\n if E:\n return 7\n if F:\n return 8\n if H:\n if A:\n return 9\n if B:\n return 10\n if E:\n return 11\n if F:\n return 12\n\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n\nLABELS = ['|G|C|D|H|A|B|E|F|', '|0| | | | | | | | ',\n '| |0| | |0| | | | ', '| |0| | | |0| | | ',\n '| |0| | | | |0| | ', '| |0| | | | | |0| ',\n '| | |0| |0| | | | ', '| | |0| | |0| | | ',\n '| | |0| | | |0| | ', '| | |0| | | | |0| ',\n '| | | |0|0| | | | ', '| | | |0| |0| | | ',\n '| | | |0| | |0| | ', '| | | |0| | | |0| ']\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith('R:'):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('L:') or dst.startswith('L2'):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('U:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith('D:'):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith('LOCAL_INTERCONNECT:'):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n elif Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n muxidx = decodemux(muxbits)\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n print('~~~~~ {} ~~~~~'.format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print('???')\n else:\n print(src, end='')\n if src in wirenamemap:\n print(' ({})'.format(wirenamemap[src]))\n else:\n print()\n",
"step-5": "import json\nimport sys\n\nwith open(sys.argv[1], 'r') as f:\n x = json.load(f)\nwith open('my_wire_to_quartus_wire.json', 'r') as f:\n wirenamemap = json.load(f)\n\nprint(\"----- There are {} muxes in the database\".format(len(x)))\nprint(\"----- There are {} routing pairs in the database\".format(sum((len(v) for k, v in x.items()))))\n\ndef bits2str(bits):\n ret = \"\"\n for row in bits:\n rowstr = \"\"\n for bit in row:\n rowstr += \"1\" if bit else \"0\"\n ret += rowstr + '\\n'\n return ret\n\ndef parse_xyi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert ipos > ypos\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:ipos]), int(inp[ipos + 1:]))\n\ndef parse_xysi(inp):\n xpos = inp.find('X')\n ypos = inp.find('Y')\n spos = inp.find('S')\n ipos = inp.find('I')\n\n assert xpos >= 0\n assert ypos > xpos\n assert spos > ypos\n assert ipos > spos\n\n sval = int(inp[spos + 1:ipos])\n assert sval == 0\n\n return (int(inp[xpos + 1:ypos]), int(inp[ypos + 1:spos]), int(inp[ipos + 1:]))\n\ndef anybits(bits):\n for y in bits:\n for x in y:\n if not x:\n return True\n return False\n\ndef decodemux(bits):\n A = not bits[0][0]\n B = not bits[0][1]\n C = not bits[0][2]\n D = not bits[0][3]\n E = not bits[1][0]\n F = not bits[1][1]\n G = not bits[1][2]\n H = not bits[1][3]\n\n assert G + C + D + H == 1\n assert A + B + E + F == 1 or (A + B + E + F == 0 and G)\n if G:\n assert A + B + C + D + E + F + H == 0\n\n if G:\n return 0\n if C:\n if A: return 1\n if B: return 2\n if E: return 3\n if F: return 4\n if D:\n if A: return 5\n if B: return 6\n if E: return 7\n if F: return 8\n if H:\n if A: return 9\n if B: return 10\n if E: return 11\n if F: return 12\n\ndef flipv(muxbits):\n return muxbits[::-1]\n\ndef fliph(muxbits):\n return [x[::-1] for x in muxbits]\n\n# # print(x)\n# uniq_r_muxes = []\n# for _ in range(8):\n# uniq_r_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"R:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_r_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(8):\n# print(\"~~~~~ R{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_r_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# # print(x)\n# uniq_l2_muxes = []\n# for _ in range(8):\n# uniq_l2_muxes.append(set())\n\n# for X in [8]:\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L2:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l2_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l2_muxes)\n# for N in range(8):\n# print(\"~~~~~ L2:{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l2_muxes[N])):\n# print(xx)\n\n# # print(x)\n# uniq_l_muxes = []\n# for _ in range(8):\n# uniq_l_muxes.append(set())\n\n# for X in range(3, 9):\n# for Y in range(1, 5):\n# for N in range(8):\n# mux = \"L:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_l_muxes)\n# for N in range(8):\n# print(\"~~~~~ L{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_muxes[N])):\n# print(xx)\n\n# uniq_u_muxes = []\n# for _ in range(7):\n# uniq_u_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"U:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_u_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ U{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_u_muxes[N])):\n# print(xx)\n\n# uniq_d_muxes = []\n# for _ in range(7):\n# uniq_d_muxes.append(set())\n\n# for X in [8]:#range(2, 8):\n# for Y in range(1, 5):\n# for N in range(7):\n# mux = \"D:X{}Y{}I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_d_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(7):\n# print(\"~~~~~ D{} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_d_muxes[N])):\n# print(xx)\n\n# uniq_l_li_muxes = []\n# for _ in range(18):\n# uniq_l_li_muxes.append(set())\n\n# for Y in range(1, 5):\n# for N in range(18):\n# mux = \"LOCAL_INTERCONNECT:X1Y{}S0I{}\".format(Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_l_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(18):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_l_li_muxes[N])):\n# print(xx)\n\n# uniq_li_muxes = []\n# for _ in range(26):\n# uniq_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for Y in range(1, 5):\n# for N in range(26):\n# mux = \"LOCAL_INTERCONNECT:X{}Y{}S0I{}\".format(X, Y, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(26):\n# print(\"~~~~~ LOCAL_INTERCONNECT:X1 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_li_muxes[N])):\n# print(xx)\n\n# uniq_top_li_muxes = []\n# for _ in range(10):\n# uniq_top_li_muxes.append(set())\n\n# for X in range(2, 8):\n# for N in range(10):\n# mux = \"LOCAL_INTERCONNECT:X{}Y5S0I{}\".format(X, N)\n# muxvals = x[mux]\n# # print(muxvals)\n# for muxsrc, muxbits in muxvals.items():\n# uniq_top_li_muxes[N].add(bits2str(muxbits))\n\n# # print(uniq_r_muxes)\n# for N in range(10):\n# print(\"~~~~~ LOCAL_INTERCONNECT:Y5 {} ~~~~~\".format(N))\n# for xx in sorted(list(uniq_top_li_muxes[N])):\n# print(xx)\n\nLABELS = [\n \"|G|C|D|H|A|B|E|F|\",\n \"|0| | | | | | | | \",\n \"| |0| | |0| | | | \",\n \"| |0| | | |0| | | \",\n \"| |0| | | | |0| | \",\n \"| |0| | | | | |0| \",\n \"| | |0| |0| | | | \",\n \"| | |0| | |0| | | \",\n \"| | |0| | | |0| | \",\n \"| | |0| | | | |0| \",\n \"| | | |0|0| | | | \",\n \"| | | |0| |0| | | \",\n \"| | | |0| | |0| | \",\n \"| | | |0| | | |0| \",\n]\n\nfor dst, srcs in x.items():\n srcs_decoded = [None] * 13\n is_tb_io = False\n for src, muxbits in srcs.items():\n if dst.startswith(\"R:\"):\n _, _, I = parse_xyi(dst)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"L:\") or dst.startswith(\"L2\"):\n _, _, I = parse_xyi(dst)\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"U:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 0 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 4:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"D:\"):\n X, _, I = parse_xyi(dst)\n if X == 8:\n muxbits = fliph(muxbits)\n\n if I == 6 and X != 8:\n muxbits = fliph(muxbits)\n if I >= 3:\n muxbits = flipv(muxbits)\n elif dst.startswith(\"LOCAL_INTERCONNECT:\"):\n X, Y, I = parse_xysi(dst[19:])\n if X == 1:\n muxbits = fliph(muxbits)\n if I > 8:\n muxbits = flipv(muxbits)\n elif X == 8:\n if I > 8:\n muxbits = flipv(muxbits)\n else:\n if Y == 0 or Y == 5:\n is_tb_io = True\n if Y == 0:\n muxbits = flipv(muxbits)\n if I < 5:\n muxbits = fliph(muxbits)\n else:\n if I in range(0, 5) or I in range(13, 18):\n muxbits = fliph(muxbits)\n if I >= 13:\n muxbits = flipv(muxbits)\n else:\n continue\n\n muxidx = decodemux(muxbits)\n\n if srcs_decoded[muxidx] is not None:\n print(dst, src, srcs_decoded[muxidx])\n assert srcs_decoded[muxidx] is None\n srcs_decoded[muxidx] = src\n\n print(\"~~~~~ {} ~~~~~\".format(dst))\n print(LABELS[0])\n if is_tb_io:\n assert srcs_decoded[0] is None\n for i in range(len(srcs_decoded)):\n if is_tb_io and i == 0:\n continue\n print(LABELS[i + 1], end='')\n src = srcs_decoded[i]\n if src is None:\n print(\"???\")\n else:\n print(src, end='')\n if src in wirenamemap:\n print(\" ({})\".format(wirenamemap[src]))\n else:\n print()\n\n # if dst.startswith(\"LOCAL_INTERCONNECT:\"):\n # continue\n\n # print(dst, src)\n\n # if dst.startswith(\"L:\"):\n # _, _, I = parse_xyi(dst)\n # muxbits = fliph(muxbits)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"R:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if dst.startswith(\"D:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 3:\n # muxbits = flipv(muxbits)\n # if I == 6:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"U:\"):\n # X, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n # if I == 0:\n # muxbits = fliph(muxbits)\n # if X == 8:\n # muxbits = fliph(muxbits)\n # if dst.startswith(\"L2:\"):\n # _, _, I = parse_xyi(dst)\n # if I >= 4:\n # muxbits = flipv(muxbits)\n\n # decodemux(muxbits)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
from urllib.error import URLError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8',)):
"""通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)"""
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):
"""获取页面的HTML代码(通过递归实现指定次数的重试操作)"""
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
"""从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)"""
soup = BeautifulSoup(page_html, 'html.parser')
for h1 in soup.find_all('h1'):
return h1.get_text()
def get_link_list(page_html):
soup = BeautifulSoup(page_html, 'html.parser')
list = []
for a_link in soup.find_all('a'):
link = a_link['href']
if ('https://' in link) or ('http://' in link):
list.append(link)
# print(page_html)
#print(list)
return list
def start_crawl(seed_url, match_pattern, *, max_depth=-1):
"""开始执行爬虫程序并对指定的数据进行持久化操作"""
# conn = pymysql.connect(host='localhost', port=3306,
# database='crawler', user='root',
# password='Huhaohao@123', charset='utf8')
conn = pymysql.connect(host='localhost', port=3306,
user='root', password='Huhaohao@123', charset='utf8')
with conn.cursor() as cursor:
#cursor.execute("create database crawler if not exists;")
cursor.execute('use crawler')
cursor.execute(
"CREATE TABLE IF NOT EXISTS tb_result " +
"(" +
"title TEXT NOT NULL," +
"link TEXT NOT NULL" +
")"
)
try:
with conn.cursor() as cursor:
url_list = [seed_url]
# 通过下面的字典避免重复抓取并控制抓取深度
visited_url_list = {seed_url: 0}
while url_list:
current_url = url_list.pop(0)
depth = visited_url_list[current_url]
if depth != max_depth:
# 尝试用utf-8/gbk/gb2312三种字符集进行页面解码
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_link_list(page_html)
param_list = []
for link in links_list:
if link not in visited_url_list:
visited_url_list[link] = depth + 1
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings, link))
cursor.executemany('insert into tb_result(title, link) values(%s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
"""主函数"""
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]*href=["\'](.*?)["\']',
max_depth=2)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "53fae0103168f4074ba0645c33e4640fcefdfc96",
"index": 731,
"step-1": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from urllib.error import URLError\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\nimport ssl\nfrom pymysql import Error\n\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if 'https://' in link or 'http://' in link:\n list.append(link)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n conn = pymysql.connect(host='localhost', port=3306, user='root',\n password='Huhaohao@123', charset='utf8')\n with conn.cursor() as cursor:\n cursor.execute('use crawler')\n cursor.execute('CREATE TABLE IF NOT EXISTS tb_result ' + '(' +\n 'title TEXT NOT NULL,' + 'link TEXT NOT NULL' + ')')\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n page_html = get_page_html(current_url, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=(\n 'utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html,\n '<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany(\n 'insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n finally:\n conn.close()\n\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n '<a[^>]*href=[\"\\\\\\'](.*?)[\"\\\\\\']', max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from urllib.error import URLError\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\nimport ssl\nfrom pymysql import Error\n\ndef decode_page(page_bytes, charsets=('utf-8',)):\n \"\"\"通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)\"\"\"\n page_html = None\n for charset in charsets:\n try:\n page_html = page_bytes.decode(charset)\n break\n except UnicodeDecodeError:\n pass\n # logging.error('Decode:', error)\n return page_html\n\n\ndef get_page_html(seed_url, *, retry_times=3, charsets=('utf-8',)):\n \"\"\"获取页面的HTML代码(通过递归实现指定次数的重试操作)\"\"\"\n page_html = None\n try:\n page_html = decode_page(urlopen(seed_url).read(), charsets)\n except URLError:\n # logging.error('URL:', error)\n if retry_times > 0:\n return get_page_html(seed_url, retry_times=retry_times - 1,\n charsets=charsets)\n return page_html\n\n\ndef get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):\n \"\"\"从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)\"\"\"\n soup = BeautifulSoup(page_html, 'html.parser')\n for h1 in soup.find_all('h1'):\n return h1.get_text()\n\n\ndef get_link_list(page_html):\n soup = BeautifulSoup(page_html, 'html.parser')\n list = []\n for a_link in soup.find_all('a'):\n link = a_link['href']\n if ('https://' in link) or ('http://' in link):\n list.append(link)\n # print(page_html)\n #print(list)\n return list\n\n\ndef start_crawl(seed_url, match_pattern, *, max_depth=-1):\n \"\"\"开始执行爬虫程序并对指定的数据进行持久化操作\"\"\"\n # conn = pymysql.connect(host='localhost', port=3306,\n # database='crawler', user='root',\n # password='Huhaohao@123', charset='utf8')\n\n conn = pymysql.connect(host='localhost', port=3306,\n user='root', password='Huhaohao@123', charset='utf8')\n\n with conn.cursor() as cursor:\n #cursor.execute(\"create database crawler if not exists;\")\n cursor.execute('use crawler')\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS tb_result \" +\n \"(\" +\n \"title TEXT NOT NULL,\" +\n \"link TEXT NOT NULL\" +\n \")\"\n )\n\n\n\n\n try:\n with conn.cursor() as cursor:\n url_list = [seed_url]\n # 通过下面的字典避免重复抓取并控制抓取深度\n visited_url_list = {seed_url: 0}\n while url_list:\n current_url = url_list.pop(0)\n depth = visited_url_list[current_url]\n if depth != max_depth:\n # 尝试用utf-8/gbk/gb2312三种字符集进行页面解码\n page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))\n links_list = get_link_list(page_html)\n param_list = []\n for link in links_list:\n if link not in visited_url_list:\n visited_url_list[link] = depth + 1\n page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))\n headings = get_matched_parts(page_html, r'<h1>(.*)<span')\n if headings:\n param_list.append((headings, link))\n cursor.executemany('insert into tb_result(title, link) values(%s, %s)',\n param_list)\n conn.commit()\n except Error:\n pass\n # logging.error('SQL:', error)\n finally:\n conn.close()\n\ndef main():\n \"\"\"主函数\"\"\"\n ssl._create_default_https_context = ssl._create_unverified_context\n start_crawl('http://sports.sohu.com/nba_a.shtml',\n r'<a[^>]*href=[\"\\'](.*?)[\"\\']',\n max_depth=2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
def name_of_function():
"""
Docstring explains function.
"""
return 'Hello'
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
<|reserved_special_token_0|>
def dog_check(mystring):
return 'dog' in mystring.lower()
<|reserved_special_token_0|>
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
<|reserved_special_token_0|>
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def name_of_function():
"""
Docstring explains function.
"""
return 'Hello'
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
<|reserved_special_token_0|>
def dog_check(mystring):
return 'dog' in mystring.lower()
<|reserved_special_token_0|>
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
<|reserved_special_token_0|>
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
<|reserved_special_token_0|>
def myfunc(word):
result = ''
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def name_of_function():
"""
Docstring explains function.
"""
return 'Hello'
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
<|reserved_special_token_0|>
def dog_check(mystring):
return 'dog' in mystring.lower()
def myfunc(*args):
return sum(args) * 0.05
<|reserved_special_token_0|>
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
<|reserved_special_token_0|>
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
<|reserved_special_token_0|>
def myfunc(word):
result = ''
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def name_of_function():
"""
Docstring explains function.
"""
return 'Hello'
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
dog_check('Dog ran away')
def dog_check(mystring):
return 'dog' in mystring.lower()
def myfunc(*args):
return sum(args) * 0.05
myfunc(14, 10, 100)
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
myfunc(fruit='apple')
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
myfunc(10, 20, 30, fruit='orange', food='eggs', animal='dog')
def myfunc(word):
result = ''
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
myfunc('VictoriaSok')
<|reserved_special_token_1|>
#Creating function
def name_of_function():
'''
Docstring explains function.
'''
return "Hello" #use return instead of print since return can be stored as a variable.
#Simple example
def dog_check(mystring):
if 'dog' in mystring.lower():
return True
else:
return False
#This is a beginner move. x in y.lower() is already a boolean.
dog_check('Dog ran away')
#Expert move:
def dog_check(mystring):
return 'dog' in mystring.lower()
# *args
def myfunc(*args): #instead of myfunc(a,b,c,...) no limit of arguments and it will be treated as tuples.
return sum(args) * 0.05
myfunc(14,10,100)
# **kwargs # kwargs returns as a dictionary
def myfunc(**kwargs):
if 'fruit' in kwargs:
print('My fruit of choice is {}'.format(kwargs['fruit']))
else:
print('I did not find any fruit here')
myfunc(fruit='apple')
#Combination
def myfunc(*args, **kwargs):
print('I would like {} {}'.format(args[0], kwargs['food']))
myfunc(10,20,30,fruit='orange',food='eggs',animal='dog')
##BONUS Project
#Define a function called myfunc that takes in a string, and returns a matching string where every even letter is uppercase, n/
#and every odd letter is lowercase.
def myfunc(word):
result = ""
for index, letter in enumerate(word):
if index % 2 == 0:
result += letter.lower()
else:
result += letter.upper()
return result
myfunc('VictoriaSok')
|
flexible
|
{
"blob_id": "1deb070dd91c01190b70fa678add31ecb82f34fa",
"index": 3404,
"step-1": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n",
"step-2": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\n<mask token>\n",
"step-3": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\ndef myfunc(*args):\n return sum(args) * 0.05\n\n\n<mask token>\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\n<mask token>\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\n<mask token>\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\n<mask token>\n",
"step-4": "def name_of_function():\n \"\"\"\n Docstring explains function.\n \"\"\"\n return 'Hello'\n\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n\n\ndog_check('Dog ran away')\n\n\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\ndef myfunc(*args):\n return sum(args) * 0.05\n\n\nmyfunc(14, 10, 100)\n\n\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n\n\nmyfunc(fruit='apple')\n\n\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n\n\nmyfunc(10, 20, 30, fruit='orange', food='eggs', animal='dog')\n\n\ndef myfunc(word):\n result = ''\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n\n\nmyfunc('VictoriaSok')\n",
"step-5": "#Creating function\n\ndef name_of_function():\n '''\n Docstring explains function.\n '''\n return \"Hello\" #use return instead of print since return can be stored as a variable.\n \n \n#Simple example\n\ndef dog_check(mystring):\n if 'dog' in mystring.lower():\n return True\n else:\n return False\n#This is a beginner move. x in y.lower() is already a boolean.\n\ndog_check('Dog ran away')\n\n#Expert move:\ndef dog_check(mystring):\n return 'dog' in mystring.lower()\n\n\n# *args\ndef myfunc(*args): #instead of myfunc(a,b,c,...) no limit of arguments and it will be treated as tuples.\n return sum(args) * 0.05\n \nmyfunc(14,10,100)\n\n\n# **kwargs # kwargs returns as a dictionary\ndef myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')\n \nmyfunc(fruit='apple')\n\n\n#Combination\ndef myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))\n \nmyfunc(10,20,30,fruit='orange',food='eggs',animal='dog')\n\n\n\n##BONUS Project\n#Define a function called myfunc that takes in a string, and returns a matching string where every even letter is uppercase, n/\n#and every odd letter is lowercase.\n\ndef myfunc(word):\n\n result = \"\"\n for index, letter in enumerate(word):\n if index % 2 == 0:\n result += letter.lower()\n else:\n result += letter.upper()\n return result\n \nmyfunc('VictoriaSok')\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class JobTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_field_order'
name = 'TestFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
<|reserved_special_token_0|>
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_pass'
name = 'TestReadOnlyPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0)
<|reserved_special_token_0|>
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_no_commit_field'
name = 'TestReadOnlyNoCommitField'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>"""
)
<|reserved_special_token_0|>
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b'I am content.\n'
cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.
file_contents)
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def setUp(self):
self.dummy_file.seek(0)
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_pass/TestFileUploadPass'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_fail/TestFileUploadFail'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
self.assertEqual(FileProxy.objects.count(), 0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JobTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_pass'
name = 'TestPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
<|reserved_special_token_0|>
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_field_order'
name = 'TestFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_no_field_order'
name = 'TestNoFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_pass'
name = 'TestReadOnlyPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0)
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_fail'
name = 'TestReadOnlyFail'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0)
self.assertNotEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_no_commit_field'
name = 'TestReadOnlyNoCommitField'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>"""
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_ipaddress_vars'
name = 'TestIPAddresses'
job_class = get_job(f'local/{module}/{name}')
form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=
'1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=
'2001:db8::1', ipv6_with_mask='2001:db8::1/64',
ipv6_network='2001:db8::/64')
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = job_class.serialize_data(form.cleaned_data)
run_job(data=data, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data['run']['log'][0][2]
job_result_data = json.loads(job_payload)
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b'I am content.\n'
cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.
file_contents)
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def setUp(self):
self.dummy_file.seek(0)
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_pass/TestFileUploadPass'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_fail/TestFileUploadFail'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
self.assertEqual(FileProxy.objects.count(), 0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JobTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_pass'
name = 'TestPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_fail'
name = 'TestFail'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_field_order'
name = 'TestFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_no_field_order'
name = 'TestNoFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_pass'
name = 'TestReadOnlyPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0)
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_fail'
name = 'TestReadOnlyFail'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0)
self.assertNotEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_no_commit_field'
name = 'TestReadOnlyNoCommitField'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>"""
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_ipaddress_vars'
name = 'TestIPAddresses'
job_class = get_job(f'local/{module}/{name}')
form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=
'1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=
'2001:db8::1', ipv6_with_mask='2001:db8::1/64',
ipv6_network='2001:db8::/64')
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = job_class.serialize_data(form.cleaned_data)
run_job(data=data, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data['run']['log'][0][2]
job_result_data = json.loads(job_payload)
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b'I am content.\n'
cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.
file_contents)
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def setUp(self):
self.dummy_file.seek(0)
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_pass/TestFileUploadPass'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_fail/TestFileUploadFail'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
self.assertEqual(FileProxy.objects.count(), 0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JobTest(TestCase):
<|reserved_special_token_0|>
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_pass'
name = 'TestPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_fail'
name = 'TestFail'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_field_order'
name = 'TestFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_no_field_order'
name = 'TestNoFieldOrder'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>"""
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_pass'
name = 'TestReadOnlyPass'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0)
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_fail'
name = 'TestReadOnlyFail'
job_class = get_job(f'local/{module}/{name}')
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
run_job(data={}, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0)
self.assertNotEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_read_only_no_commit_field'
name = 'TestReadOnlyNoCommitField'
job_class = get_job(f'local/{module}/{name}')
form = job_class().as_form()
self.assertHTMLEqual(form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>"""
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
module = 'test_ipaddress_vars'
name = 'TestIPAddresses'
job_class = get_job(f'local/{module}/{name}')
form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=
'1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=
'2001:db8::1', ipv6_with_mask='2001:db8::1/64',
ipv6_network='2001:db8::/64')
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = job_class.serialize_data(form.cleaned_data)
run_job(data=data, request=None, commit=False, job_result_pk=
job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data['run']['log'][0][2]
job_result_data = json.loads(job_payload)
self.assertEqual(job_result.status, JobResultStatusChoices.
STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b'I am content.\n'
cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.
file_contents)
cls.job_content_type = ContentType.objects.get(app_label='extras',
model='job')
def setUp(self):
self.dummy_file.seek(0)
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_pass/TestFileUploadPass'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,
'extras/tests/dummy_jobs')):
job_name = 'local/test_file_upload_fail/TestFileUploadFail'
job_class = get_job(job_name)
job_result = JobResult.objects.create(name=job_class.class_path,
obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())
data = {'file': self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))
self.assertEqual(serialized_data['file'], FileProxy.objects.
latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
run_job(data=serialized_data, request=None, commit=False,
job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.data['run']['log'][0][2],
f'File contents: {self.file_contents}')
self.assertEqual(job_result.data['run']['log'][-1][-1],
'Database changes have been reverted due to error.')
self.assertEqual(FileProxy.objects.count(), 0)
<|reserved_special_token_1|>
import json
import os
import uuid
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import FileAttachment, FileProxy, JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_field_order"
name = "TestFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_no_field_order"
name = "TestNoFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_pass"
name = "TestReadOnlyPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_fail"
name = "TestReadOnlyFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
# Also ensure the standard log message about aborting the transaction is *not* present
self.assertNotEqual(
job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error."
)
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_no_commit_field"
name = "TestReadOnlyNoCommitField"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>""",
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_ipaddress_vars"
name = "TestIPAddresses"
job_class = get_job(f"local/{module}/{name}")
# Fill out the form
form_data = dict(
ipv4_address="1.2.3.4",
ipv4_with_mask="1.2.3.4/32",
ipv4_network="1.2.3.0/24",
ipv6_address="2001:db8::1",
ipv6_with_mask="2001:db8::1/64",
ipv6_network="2001:db8::/64",
)
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
# Prepare the job data
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
data = job_class.serialize_data(form.cleaned_data)
# Run the job and extract the job payload data
run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data["run"]["log"][0][2] # Indexing makes me sad.
job_result_data = json.loads(job_payload)
# Assert stuff
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b"I am content.\n"
cls.dummy_file = SimpleUploadedFile(name="dummy.txt", content=cls.file_contents)
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def setUp(self):
self.dummy_file.seek(0) # Reset cursor so we can read it again.
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_pass/TestFileUploadPass"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_fail/TestFileUploadFail"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Also ensure the standard log message about aborting the transaction is present
self.assertEqual(job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error.")
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
|
flexible
|
{
"blob_id": "d2298ad1e4737b983ba6d1f2fff59750137510b5",
"index": 904,
"step-1": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <mask token>\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n <mask token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <mask token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-2": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n <mask token>\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-3": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n <mask token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-4": "<mask token>\n\n\nclass JobTest(TestCase):\n <mask token>\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-5": "import json\nimport os\nimport uuid\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.dcim.models import Site\nfrom nautobot.extras.choices import JobResultStatusChoices\nfrom nautobot.extras.jobs import get_job, run_job\nfrom nautobot.extras.models import FileAttachment, FileProxy, JobResult\nfrom nautobot.utilities.testing import TestCase\n\n\nclass JobTest(TestCase):\n \"\"\"\n Test basic jobs to ensure importing works.\n \"\"\"\n\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_pass\"\n name = \"TestPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_fail\"\n name = \"TestFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_field_order\"\n name = \"TestFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_no_field_order\"\n name = \"TestNoFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_pass\"\n name = \"TestReadOnlyPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_fail\"\n name = \"TestReadOnlyFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n # Also ensure the standard log message about aborting the transaction is *not* present\n self.assertNotEqual(\n job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\"\n )\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_no_commit_field\"\n name = \"TestReadOnlyNoCommitField\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\",\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_ipaddress_vars\"\n name = \"TestIPAddresses\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n # Fill out the form\n form_data = dict(\n ipv4_address=\"1.2.3.4\",\n ipv4_with_mask=\"1.2.3.4/32\",\n ipv4_network=\"1.2.3.0/24\",\n ipv6_address=\"2001:db8::1\",\n ipv6_with_mask=\"2001:db8::1/64\",\n ipv6_network=\"2001:db8::/64\",\n )\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n\n # Prepare the job data\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n data = job_class.serialize_data(form.cleaned_data)\n\n # Run the job and extract the job payload data\n run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data[\"run\"][\"log\"][0][2] # Indexing makes me sad.\n job_result_data = json.loads(job_payload)\n\n # Assert stuff\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b\"I am content.\\n\"\n cls.dummy_file = SimpleUploadedFile(name=\"dummy.txt\", content=cls.file_contents)\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def setUp(self):\n self.dummy_file.seek(0) # Reset cursor so we can read it again.\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_pass/TestFileUploadPass\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_fail/TestFileUploadFail\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n # Also ensure the standard log message about aborting the transaction is present\n self.assertEqual(job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\")\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"step-ids": [
10,
15,
16,
17,
20
]
}
|
[
10,
15,
16,
17,
20
] |
from jabberbot import JabberBot, botcmd
import datetime
import logging
import sys
import time;
from config import username, password, chatroom, adminuser
class SystemInfoJabberBot(JabberBot):
@botcmd
def serverinfo( self, mess, args):
"""Displays information about the server"""
version = open('/proc/version').read().strip()
loadavg = open('/proc/loadavg').read().strip()
return '%snn%s' % ( version, loadavg, )
@botcmd
def time( self, mess, args):
"""Displays current server time"""
return str(datetime.datetime.now())
@botcmd
def rot13( self, mess, args):
"""Returns passed arguments rot13'ed"""
return args.encode('rot13')
@botcmd
def whoami(self, mess, args):
"""Tells you your username"""
return mess.getFrom().getStripped()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
bot = SystemInfoJabberBot(username,password)
bot.join_room(chatroom, 'credilbot')
bot.send(adminuser, 'Hello Julien, je suis connecte')
#print bot.muc_room_participants(chatroom);
bot.send(chatroom, 'Testing...', None, 'groupchat')
while 1:
bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')
time.sleep(5)
bot.serve_forever()
|
normal
|
{
"blob_id": "c9872fb536fd6552e2a5353566305555808747f7",
"index": 1777,
"step-1": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n <mask token>\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\n<mask token>\nroot.setLevel(logging.DEBUG)\n<mask token>\nch.setLevel(logging.DEBUG)\n<mask token>\nch.setFormatter(formatter)\nroot.addHandler(ch)\n<mask token>\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\nbot.send(chatroom, 'Testing...', None, 'groupchat')\nwhile 1:\n bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n time.sleep(5)\nbot.serve_forever()\n",
"step-4": "<mask token>\n\n\nclass SystemInfoJabberBot(JabberBot):\n\n @botcmd\n def serverinfo(self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n return '%snn%s' % (version, loadavg)\n\n @botcmd\n def time(self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13(self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\nbot = SystemInfoJabberBot(username, password)\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\nbot.send(chatroom, 'Testing...', None, 'groupchat')\nwhile 1:\n bot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n time.sleep(5)\nbot.serve_forever()\n",
"step-5": "from jabberbot import JabberBot, botcmd\nimport datetime\nimport logging\nimport sys\nimport time;\n\nfrom config import username, password, chatroom, adminuser\n\nclass SystemInfoJabberBot(JabberBot):\n @botcmd\n def serverinfo( self, mess, args):\n \"\"\"Displays information about the server\"\"\"\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )\n\n @botcmd\n def time( self, mess, args):\n \"\"\"Displays current server time\"\"\"\n return str(datetime.datetime.now())\n\n @botcmd\n def rot13( self, mess, args):\n \"\"\"Returns passed arguments rot13'ed\"\"\"\n return args.encode('rot13')\n\n @botcmd\n def whoami(self, mess, args):\n \"\"\"Tells you your username\"\"\"\n return mess.getFrom().getStripped()\n\n\nroot = logging.getLogger()\nroot.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nroot.addHandler(ch)\n\n\nbot = SystemInfoJabberBot(username,password)\nbot.join_room(chatroom, 'credilbot')\nbot.send(adminuser, 'Hello Julien, je suis connecte')\n#print bot.muc_room_participants(chatroom);\nbot.send(chatroom, 'Testing...', None, 'groupchat')\n\nwhile 1: \n\tbot.send(chatroom, str(datetime.datetime.now()), None, 'groupchat')\n\ttime.sleep(5)\n\nbot.serve_forever()\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
"""
This module is used to extract features from the lines extracted from documents
using BERT encodings. This package leverages the bert-as-a-server package to create the
embeddings.
Example:
feature_extractor = FeatureExtractor(document) # document is of class Document
encoded_doc = feature_extractor.encode()
feature_extractor.end()
Todo:
* lines --> sentences for a better representation of the embeddings
* try different BERT models
* train the BERT model for a specific task before encoding
"""
from bert_serving.client import BertClient
class FeatureExtractor:
"""Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.
Attributes:
document (Document): This object encompasses the extracted text from one of the
PDF documents. There is an encoding field on each Line which is where the
embedding from BERT will be included, and where the text that gets encoded will
be provided.
_bc (BertClient): Connection to the BertServer which can be used for encoding.
"""
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for (line, encoding) in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
|
normal
|
{
"blob_id": "882d265f14c04b2f2f626504d18e2cd07dcc8637",
"index": 3042,
"step-1": "<mask token>\n\n\nclass FeatureExtractor:\n <mask token>\n <mask token>\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-2": "<mask token>\n\n\nclass FeatureExtractor:\n <mask token>\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-3": "<mask token>\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-4": "<mask token>\nfrom bert_serving.client import BertClient\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-5": "\"\"\"\n\nThis module is used to extract features from the lines extracted from documents\nusing BERT encodings. This package leverages the bert-as-a-server package to create the\nembeddings.\n\nExample:\n feature_extractor = FeatureExtractor(document) # document is of class Document\n encoded_doc = feature_extractor.encode()\n feature_extractor.end()\n\nTodo:\n * lines --> sentences for a better representation of the embeddings\n * try different BERT models\n * train the BERT model for a specific task before encoding\n\n\n\"\"\"\n\nfrom bert_serving.client import BertClient\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for (line, encoding) in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from Socket import Socket
import threading
class Server(Socket):
def __init__(self):
super(Server, self).__init__()
print("server listening")
self.users = []
def set_up(self):
self.bind(("192.168.0.109", 1337))
self.listen(0)
self.accept_sockets()
def send_data(self, data):
for user in self.users:
try:
user.send(data)
except ConnectionResetError:
self.users.pop(self.users.index(user))
pass
def listen_socket(self, listened_socket=None):
countForDel = 0
while True:
data = listened_socket.recv(2048)
if data.decode("utf-8")[0:-2] == '':
countForDel += 1
if countForDel > 5:
print("deleting user: Antispam")
self.users.pop(self.users.index(listened_socket))
raise ConnectionResetError
print(f"User sent {data}")
self.send_data(data)
def accept_sockets(self):
while True:
user_socket, address = self.accept()
print(f"User <{address[0]}> connected!")
self.users.append(user_socket) # добавляется юзер
print(len(self.users))
listen_accepted_user = threading.Thread(
target=self.listen_socket,
args=(user_socket,))
listen_accepted_user.start()
if __name__ == '__main__':
server = Server()
server.set_up()
|
normal
|
{
"blob_id": "2027904401e5be7b1c95eebec3a1e6a88c25660c",
"index": 9338,
"step-1": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n <mask token>\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n",
"step-4": "from Socket import Socket\nimport threading\n\n\nclass Server(Socket):\n\n def __init__(self):\n super(Server, self).__init__()\n print('server listening')\n self.users = []\n\n def set_up(self):\n self.bind(('192.168.0.109', 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode('utf-8')[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print('deleting user: Antispam')\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n print(f'User sent {data}')\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f'User <{address[0]}> connected!')\n self.users.append(user_socket)\n print(len(self.users))\n listen_accepted_user = threading.Thread(target=self.\n listen_socket, args=(user_socket,))\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n",
"step-5": "from Socket import Socket\nimport threading\n\nclass Server(Socket):\n def __init__(self):\n super(Server, self).__init__()\n\n print(\"server listening\")\n\n self.users = []\n\n def set_up(self):\n self.bind((\"192.168.0.109\", 1337))\n self.listen(0)\n self.accept_sockets()\n\n def send_data(self, data):\n for user in self.users:\n try:\n user.send(data)\n except ConnectionResetError:\n self.users.pop(self.users.index(user))\n pass\n\n def listen_socket(self, listened_socket=None):\n countForDel = 0\n while True:\n data = listened_socket.recv(2048)\n if data.decode(\"utf-8\")[0:-2] == '':\n countForDel += 1\n if countForDel > 5:\n print(\"deleting user: Antispam\")\n self.users.pop(self.users.index(listened_socket))\n raise ConnectionResetError\n \n print(f\"User sent {data}\")\n self.send_data(data)\n\n def accept_sockets(self):\n while True:\n user_socket, address = self.accept()\n print(f\"User <{address[0]}> connected!\")\n self.users.append(user_socket) # добавляется юзер\n print(len(self.users))\n\n listen_accepted_user = threading.Thread(\n target=self.listen_socket,\n args=(user_socket,))\n\n listen_accepted_user.start()\n\n\nif __name__ == '__main__':\n server = Server()\n server.set_up()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from deuces.card import Card
from deuces.deck import Deck
from fast_utils.hand_strength.original_HS import *
from fast_utils.hand_strength.nn_HS import encode_hs
from fast_utils.expected_hand_strength.nn_EHS import *
from keras.models import load_model
def read_lookup_table(hole_cards, lookup_table):
"""
Reads the preflop lookup table preflop_EHSs.txt.
Args:
hole_cards: list of int (deuces cards)
lookup_table: read from preflop_EHSs.txt
Return:
tuple (float, float): EHS, EHS^2
"""
sorted_hole = sorted(hole_cards)
sorted_hole.reverse()
card_strings = [Card.int_to_str(card) for card in sorted_hole]
if card_strings[0][1] != card_strings[1][1]:
suited = False
else:
suited = True
card_strings[0] = card_strings[0][0] + 'd'
if suited:
card_strings[1] = card_strings[1][0] + 'd'
else:
card_strings[1] = card_strings[1][0] + 's'
card_strings = tuple(card_strings)
return lookup_table[card_strings]
|
normal
|
{
"blob_id": "8503998fc881f47dc695d3ea4c7f56fa65a96e8a",
"index": 2874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_lookup_table(hole_cards, lookup_table):\n \"\"\"\n Reads the preflop lookup table preflop_EHSs.txt.\n Args: \n hole_cards: list of int (deuces cards)\n lookup_table: read from preflop_EHSs.txt\n Return:\n tuple (float, float): EHS, EHS^2\n \"\"\"\n sorted_hole = sorted(hole_cards)\n sorted_hole.reverse()\n card_strings = [Card.int_to_str(card) for card in sorted_hole]\n if card_strings[0][1] != card_strings[1][1]:\n suited = False\n else:\n suited = True\n card_strings[0] = card_strings[0][0] + 'd'\n if suited:\n card_strings[1] = card_strings[1][0] + 'd'\n else:\n card_strings[1] = card_strings[1][0] + 's'\n card_strings = tuple(card_strings)\n return lookup_table[card_strings]\n",
"step-3": "from deuces.card import Card\nfrom deuces.deck import Deck\nfrom fast_utils.hand_strength.original_HS import *\nfrom fast_utils.hand_strength.nn_HS import encode_hs\nfrom fast_utils.expected_hand_strength.nn_EHS import *\nfrom keras.models import load_model\n\n\ndef read_lookup_table(hole_cards, lookup_table):\n \"\"\"\n Reads the preflop lookup table preflop_EHSs.txt.\n Args: \n hole_cards: list of int (deuces cards)\n lookup_table: read from preflop_EHSs.txt\n Return:\n tuple (float, float): EHS, EHS^2\n \"\"\"\n sorted_hole = sorted(hole_cards)\n sorted_hole.reverse()\n card_strings = [Card.int_to_str(card) for card in sorted_hole]\n if card_strings[0][1] != card_strings[1][1]:\n suited = False\n else:\n suited = True\n card_strings[0] = card_strings[0][0] + 'd'\n if suited:\n card_strings[1] = card_strings[1][0] + 'd'\n else:\n card_strings[1] = card_strings[1][0] + 's'\n card_strings = tuple(card_strings)\n return lookup_table[card_strings]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from random import randint
cantidad = int(input("Numero de preguntas: "))
contador_bien = 0
contador_mal = 0
while cantidad <= 0:
print ("El numero de preguntas debe ser al menos 1")
cantidad = int(input("Numero de preguntas: "))
for i in range(cantidad):
numero = randint(2,10)
numero2 = randint(2,10)
aleatorio = int(input("¿Cuanto es %d * %d? " % (numero, numero2)))
if numero * numero2 == aleatorio:
print ("Correcto")
contador_bien = contador_bien + 1
else:
print ("Incorrecto")
contador_mal = contador_mal + 1
print ("Ha contestado bien", contador_bien, "preguntas")
print ("Ha contestado mal", contador_mal, "preguntas")
nota = (contador_bien / cantidad) * 10
print ("Le corresponde una nota de %.2f"%nota)
|
normal
|
{
"blob_id": "48bc5d4b191fa631650b60240560dbece6396312",
"index": 655,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile cantidad <= 0:\n print('El numero de preguntas debe ser al menos 1')\n cantidad = int(input('Numero de preguntas: '))\nfor i in range(cantidad):\n numero = randint(2, 10)\n numero2 = randint(2, 10)\n aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))\n if numero * numero2 == aleatorio:\n print('Correcto')\n contador_bien = contador_bien + 1\n else:\n print('Incorrecto')\n contador_mal = contador_mal + 1\nprint('Ha contestado bien', contador_bien, 'preguntas')\nprint('Ha contestado mal', contador_mal, 'preguntas')\n<mask token>\nprint('Le corresponde una nota de %.2f' % nota)\n",
"step-3": "<mask token>\ncantidad = int(input('Numero de preguntas: '))\ncontador_bien = 0\ncontador_mal = 0\nwhile cantidad <= 0:\n print('El numero de preguntas debe ser al menos 1')\n cantidad = int(input('Numero de preguntas: '))\nfor i in range(cantidad):\n numero = randint(2, 10)\n numero2 = randint(2, 10)\n aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))\n if numero * numero2 == aleatorio:\n print('Correcto')\n contador_bien = contador_bien + 1\n else:\n print('Incorrecto')\n contador_mal = contador_mal + 1\nprint('Ha contestado bien', contador_bien, 'preguntas')\nprint('Ha contestado mal', contador_mal, 'preguntas')\nnota = contador_bien / cantidad * 10\nprint('Le corresponde una nota de %.2f' % nota)\n",
"step-4": "from random import randint\ncantidad = int(input('Numero de preguntas: '))\ncontador_bien = 0\ncontador_mal = 0\nwhile cantidad <= 0:\n print('El numero de preguntas debe ser al menos 1')\n cantidad = int(input('Numero de preguntas: '))\nfor i in range(cantidad):\n numero = randint(2, 10)\n numero2 = randint(2, 10)\n aleatorio = int(input('¿Cuanto es %d * %d? ' % (numero, numero2)))\n if numero * numero2 == aleatorio:\n print('Correcto')\n contador_bien = contador_bien + 1\n else:\n print('Incorrecto')\n contador_mal = contador_mal + 1\nprint('Ha contestado bien', contador_bien, 'preguntas')\nprint('Ha contestado mal', contador_mal, 'preguntas')\nnota = contador_bien / cantidad * 10\nprint('Le corresponde una nota de %.2f' % nota)\n",
"step-5": "from random import randint\ncantidad = int(input(\"Numero de preguntas: \"))\ncontador_bien = 0\ncontador_mal = 0\n\nwhile cantidad <= 0:\n\tprint (\"El numero de preguntas debe ser al menos 1\")\n\tcantidad = int(input(\"Numero de preguntas: \"))\n\nfor i in range(cantidad):\n\tnumero = randint(2,10)\n\tnumero2 = randint(2,10)\n\taleatorio = int(input(\"¿Cuanto es %d * %d? \" % (numero, numero2)))\n\n\tif numero * numero2 == aleatorio:\n\t\tprint (\"Correcto\")\n\t\tcontador_bien = contador_bien + 1\n\telse:\n\t\tprint (\"Incorrecto\")\n\t\tcontador_mal = contador_mal + 1\n\nprint (\"Ha contestado bien\", contador_bien, \"preguntas\")\nprint (\"Ha contestado mal\", contador_mal, \"preguntas\")\nnota = (contador_bien / cantidad) * 10\nprint (\"Le corresponde una nota de %.2f\"%nota)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mydict = {}
mylist0 = np.array([1, 2, 3, 4, 5])
mylist1 = np.array([2, 3, 4, 5, 6])
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
<|reserved_special_token_1|>
import numpy as np
mydict = {}
mylist0 = np.array([1, 2, 3, 4, 5])
mylist1 = np.array([2, 3, 4, 5, 6])
print(mydict)
print(mylist0)
print(mylist1)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist0
else:
mydict[c] = mylist0
print(mydict)
for c in ('0', '1'):
if c in mydict:
mydict[c] += mylist1
else:
mydict[c] = mylist1
print(mydict)
|
flexible
|
{
"blob_id": "6e5b8be6182f39f185f4547f0abd84a4e404bf34",
"index": 1861,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-3": "<mask token>\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-4": "import numpy as np\nmydict = {}\nmylist0 = np.array([1, 2, 3, 4, 5])\nmylist1 = np.array([2, 3, 4, 5, 6])\nprint(mydict)\nprint(mylist0)\nprint(mylist1)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist0\n else:\n mydict[c] = mylist0\nprint(mydict)\nfor c in ('0', '1'):\n if c in mydict:\n mydict[c] += mylist1\n else:\n mydict[c] = mylist1\nprint(mydict)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mc = Minecraft.create()
myID = mc.getPlayerEntityId('Baymax1112')
mineral = [14, 15, 16, 56, 73, 129, 57]
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
<|reserved_special_token_1|>
from mcpi.minecraft import Minecraft
from time import sleep
import random
mc = Minecraft.create()
myID = mc.getPlayerEntityId('Baymax1112')
mineral = [14, 15, 16, 56, 73, 129, 57]
while True:
sleep(0.5)
r = random.choice(mineral)
x, y, z = mc.entity.getTilePos(myID)
mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)
<|reserved_special_token_1|>
from mcpi.minecraft import Minecraft
from time import sleep
import random
mc = Minecraft.create()
myID=mc.getPlayerEntityId("Baymax1112")
mineral = [14,15,16,56,73,129,57]
while True:
sleep(0.5)
r=random.choice(mineral)
x,y,z = mc.entity.getTilePos(myID)
mc.setBlocks(x+1,y+3,z+1,x-1,y-3,z-1,r)
|
flexible
|
{
"blob_id": "b28ae19f31ae746f901dea645dfeaa211a15cd31",
"index": 1879,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-3": "<mask token>\nmc = Minecraft.create()\nmyID = mc.getPlayerEntityId('Baymax1112')\nmineral = [14, 15, 16, 56, 73, 129, 57]\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-4": "from mcpi.minecraft import Minecraft\nfrom time import sleep\nimport random\nmc = Minecraft.create()\nmyID = mc.getPlayerEntityId('Baymax1112')\nmineral = [14, 15, 16, 56, 73, 129, 57]\nwhile True:\n sleep(0.5)\n r = random.choice(mineral)\n x, y, z = mc.entity.getTilePos(myID)\n mc.setBlocks(x + 1, y + 3, z + 1, x - 1, y - 3, z - 1, r)\n",
"step-5": "from mcpi.minecraft import Minecraft\nfrom time import sleep\nimport random \nmc = Minecraft.create()\nmyID=mc.getPlayerEntityId(\"Baymax1112\")\nmineral = [14,15,16,56,73,129,57]\nwhile True:\n sleep(0.5)\n r=random.choice(mineral)\n x,y,z = mc.entity.getTilePos(myID)\n mc.setBlocks(x+1,y+3,z+1,x-1,y-3,z-1,r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',
'PlanLength', 'ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
df.columns = ['c', 'Searcher']
df = df.reindex(columns=df.columns.tolist() + val_cols)
sr = df.loc[df.c == 'Searcher', 'Searcher']
for idx, sr_row in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
<|reserved_special_token_0|>
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans / dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn / n_plans
text = (
f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'
)
text += (
f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'
)
if len(uniq_probs) < 4:
text += ' And this occurs only for Problems: '
pro = ','.join('{}' for p in uniq_probs) + '.<br>'
text += pro.format(*uniq_probs)
else:
text += ' And this occurs for all Problems.'
text += '<br>'
return df_fn_html, text, dfout
def make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95
), to_file='', show=False, excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1 > 0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2 > 0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
search = df_list[0].Searcher.tolist()
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i * m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4:
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,
fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',
fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',
labelspacing=0.6, fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
<|reserved_special_token_0|>
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
ma = mag[mag > mag_aver].index.tolist()
above_multiples = mag_aver, df2.loc[ma, 'Searcher']
return above_multiples
<|reserved_special_token_0|>
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ''
for i, c in candidates_tup:
elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'
text = (
'<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">')
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f'<dl>{elim_list}</dl></p></pre>'
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style=
'{width: 80%}'):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f'<div style="{div_style}">{div_html_text}</div>'
if output_string:
return div
else:
return Markdown(div)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),
acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [(i + 1) for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for
p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':
[len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
<|reserved_special_token_0|>
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=
False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
sfx = ['.csv', '_df.csv']
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',
'PlanLength', 'ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
df.columns = ['c', 'Searcher']
df = df.reindex(columns=df.columns.tolist() + val_cols)
sr = df.loc[df.c == 'Searcher', 'Searcher']
for idx, sr_row in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',
'GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',
'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans / dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn / n_plans
text = (
f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'
)
text += (
f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'
)
if len(uniq_probs) < 4:
text += ' And this occurs only for Problems: '
pro = ','.join('{}' for p in uniq_probs) + '.<br>'
text += pro.format(*uniq_probs)
else:
text += ' And this occurs for all Problems.'
text += '<br>'
return df_fn_html, text, dfout
def make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95
), to_file='', show=False, excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1 > 0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2 > 0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
search = df_list[0].Searcher.tolist()
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i * m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4:
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,
fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',
fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',
labelspacing=0.6, fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
<|reserved_special_token_0|>
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
ma = mag[mag > mag_aver].index.tolist()
above_multiples = mag_aver, df2.loc[ma, 'Searcher']
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=
False):
p1 = df1.loc[0, 'Problem'][-1]
p2 = df2.loc[0, 'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = (
f'<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, '
)
text += (
f'the <i>average</i> order of magnitude difference in {headinglc} is ')
text += (
f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'
)
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1, 'Problem'] != problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(
time_above[:time_order_av]))
out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ''
for i, c in candidates_tup:
elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'
text = (
'<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">')
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f'<dl>{elim_list}</dl></p></pre>'
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style=
'{width: 80%}'):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f'<div style="{div_style}">{div_html_text}</div>'
if output_string:
return div
else:
return Markdown(div)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.style.use('seaborn-muted')
<|reserved_special_token_0|>
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),
acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [(i + 1) for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for
p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':
[len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
<|reserved_special_token_0|>
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=
False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
sfx = ['.csv', '_df.csv']
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',
'PlanLength', 'ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
df.columns = ['c', 'Searcher']
df = df.reindex(columns=df.columns.tolist() + val_cols)
sr = df.loc[df.c == 'Searcher', 'Searcher']
for idx, sr_row in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',
'GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',
'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans / dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn / n_plans
text = (
f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'
)
text += (
f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'
)
if len(uniq_probs) < 4:
text += ' And this occurs only for Problems: '
pro = ','.join('{}' for p in uniq_probs) + '.<br>'
text += pro.format(*uniq_probs)
else:
text += ' And this occurs for all Problems.'
text += '<br>'
return df_fn_html, text, dfout
def make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95
), to_file='', show=False, excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1 > 0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2 > 0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
search = df_list[0].Searcher.tolist()
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i * m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4:
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,
fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',
fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',
labelspacing=0.6, fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{' + str(i) + ':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
ma = mag[mag > mag_aver].index.tolist()
above_multiples = mag_aver, df2.loc[ma, 'Searcher']
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=
False):
p1 = df1.loc[0, 'Problem'][-1]
p2 = df2.loc[0, 'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = (
f'<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, '
)
text += (
f'the <i>average</i> order of magnitude difference in {headinglc} is ')
text += (
f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'
)
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1, 'Problem'] != problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(
time_above[:time_order_av]))
out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ''
for i, c in candidates_tup:
elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'
text = (
'<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">')
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f'<dl>{elim_list}</dl></p></pre>'
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style=
'{width: 80%}'):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f'<div style="{div_style}">{div_html_text}</div>'
if output_string:
return div
else:
return Markdown(div)
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib as mpl
from matplotlib import pyplot as plt
plt.style.use('seaborn-muted')
from IPython.display import HTML, Markdown
import air_cargo_problems as acp
problems = ['Air Cargo Problem 1', 'Air Cargo Problem 2',
'Air Cargo Problem 3', 'Air Cargo Problem 4']
SEARCHES = ['breadth_first_search', 'depth_first_graph_search',
'uniform_cost_search', 'greedy_best_first_graph_search h_unmet_goals',
'greedy_best_first_graph_search h_pg_levelsum',
'greedy_best_first_graph_search h_pg_maxlevel',
'greedy_best_first_graph_search h_pg_setlevel',
'astar_search h_unmet_goals', 'astar_search h_pg_levelsum',
'astar_search h_pg_maxlevel', 'astar_search h_pg_setlevel']
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),
acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [(i + 1) for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for
p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':
[len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
specs = get_prob_specs()
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=
False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
sfx = ['.csv', '_df.csv']
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',
'PlanLength', 'ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
df.columns = ['c', 'Searcher']
df = df.reindex(columns=df.columns.tolist() + val_cols)
sr = df.loc[df.c == 'Searcher', 'Searcher']
for idx, sr_row in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',
'GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',
'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans / dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn / n_plans
text = (
f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'
)
text += (
f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'
)
if len(uniq_probs) < 4:
text += ' And this occurs only for Problems: '
pro = ','.join('{}' for p in uniq_probs) + '.<br>'
text += pro.format(*uniq_probs)
else:
text += ' And this occurs for all Problems.'
text += '<br>'
return df_fn_html, text, dfout
def make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95
), to_file='', show=False, excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1 > 0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2 > 0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
search = df_list[0].Searcher.tolist()
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i * m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4:
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,
fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',
fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',
labelspacing=0.6, fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{' + str(i) + ':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
ma = mag[mag > mag_aver].index.tolist()
above_multiples = mag_aver, df2.loc[ma, 'Searcher']
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=
False):
p1 = df1.loc[0, 'Problem'][-1]
p2 = df2.loc[0, 'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = (
f'<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, '
)
text += (
f'the <i>average</i> order of magnitude difference in {headinglc} is ')
text += (
f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'
)
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1, 'Problem'] != problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(
time_above[:time_order_av]))
out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ''
for i, c in candidates_tup:
elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'
text = (
'<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">')
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f'<dl>{elim_list}</dl></p></pre>'
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style=
'{width: 80%}'):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f'<div style="{div_style}">{div_html_text}</div>'
if output_string:
return div
else:
return Markdown(div)
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib as mpl
from matplotlib import pyplot as plt
plt.style.use('seaborn-muted')
#from IPython import get_ipython
from IPython.display import HTML, Markdown
import air_cargo_problems as acp
problems = ['Air Cargo Problem 1',
'Air Cargo Problem 2',
'Air Cargo Problem 3',
'Air Cargo Problem 4']
SEARCHES = ['breadth_first_search',
'depth_first_graph_search',
'uniform_cost_search',
'greedy_best_first_graph_search h_unmet_goals',
'greedy_best_first_graph_search h_pg_levelsum',
'greedy_best_first_graph_search h_pg_maxlevel',
'greedy_best_first_graph_search h_pg_setlevel',
'astar_search h_unmet_goals',
'astar_search h_pg_levelsum',
'astar_search h_pg_maxlevel',
'astar_search h_pg_setlevel']
def get_prob_specs():
Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),
acp.air_cargo_p3(), acp.air_cargo_p4()]
problems_specs = {'Problem': [name for name in problems],
'Air cargo problem': [i+1 for i in range(len(problems))],
'Cargos': [len(p.cargos) for p in Probs],
'Planes': [len(p.planes) for p in Probs],
'Airports': [len(p.airports) for p in Probs],
'Goal': [len(p.goal) for p in Probs]}
return pd.DataFrame(problems_specs)
specs = get_prob_specs()
def df2tsv(df, fname, replace=False):
if Path(fname).exists():
if replace:
df.to_csv(fname, sep='\t')
#else:
# print(f'File {fname} not replaced.')
return
df.to_csv(fname, sep='\t')
return
def get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):
"""
Combine all processed files of a problem found in Path(data_dir) with given stem.
The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.
Input example:
file_stem = 'prob_2'
problem = 'Air Cargo Problem 2'
Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.
"""
if file_stem is None or problem is None:
print('file_stem and problem must have a value.')
return
t = '\t'
# input/output file suffixes:
sfx = ['.csv', '_df.csv']
# Try retrieving it from out_dir if not replacing it:
fout = None
if file_as_tsv:
fout = Path(out_dir).joinpath(file_stem + sfx[1])
if fout.exists() and not replace:
df = pd.read_csv(fout, sep=t)
try:
return df.drop('Unnamed: 0', axis=1)
except KeyError:
pass
# else: (re)process
pfiles = list(Path(raw_dir).glob(file_stem + '*'))
if len(pfiles) == 0:
print(f'No raw files with stem: {file_stem}')
return
dflist = []
for f in pfiles:
df, err = get_results_df(f, problem)
if df is not None:
df = df.merge(specs)
df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)
df['index'] = df['index'].astype(int)
df.set_index('index', drop=True, inplace=True)
dflist.append(df)
del df
else:
print(f'Error from get_results_df:\n\t{err}')
dfout = pd.concat(dflist, ignore_index=False)
dfout.sort_index(inplace=True)
if file_as_tsv:
df2tsv(dfout, fout, replace=replace)
return dfout
def get_results_df(fname, problem):
"""Process csv into dataframe.
"""
t = '\t'
# Cols to add:
val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']
err = ''
df = pd.read_csv(fname, sep=t)
if df.shape[0] < len(val_cols):
err = f'Data for {fname.name} is incomplete.'
return None, err
# Rename cols: c (temp) -> Searcher
df.columns = ['c', 'Searcher']
# Add new cols & reindex
df = df.reindex(columns = df.columns.tolist() + val_cols)
# Populate new cols according to row with search name:
sr = df.loc[df.c == 'Searcher', 'Searcher']
for (idx, sr_row) in sr.items():
j = idx
for c in df.columns[2:].tolist():
j += 1
if c == 'ElapsedSeconds':
df.loc[idx, c] = float(df.loc[j, 'Searcher'])
else:
df.loc[idx, c] = int(df.loc[j, 'Searcher'])
df.dropna(inplace=True)
# Add a minute column:
df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)
# Replace values of 1st col with problem name & update col name:
df['c'] = problem
df.rename(columns={'c': 'Problem'}, inplace=True)
df.reset_index(drop=True, inplace=True)
return df, ''
def concat_all_dfs(dflist):
"""
Output combined df for complete runs, Actions>0.
"""
dfall = pd.concat(dflist, ignore_index=False)
dfall.reset_index(drop=False, inplace=True)
dfall.rename(columns={'index': 'id'}, inplace=True)
# reduced
drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']
dfa = dfall.drop(drop_cols, axis=1)
del dfall
# add col for function name
dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]
# reorder cols
dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',
'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]
# complete runs only:
return dfa[dfa['Actions'].values > 0]
def plans_length(dfa, which):
"""
dfa: frame of concatenated df1 to df4.
Analysis of plan length for which in ['double', 'single']:
PlanLength is double(single)-digit.
"""
if which == 'double':
msk = dfa.PlanLength >= 10
col2 = 'Frequency where PlanLength >=10'
else:
msk = dfa.PlanLength < 10
col2 = 'Frequency where PlanLength <10'
dfa_rows = dfa.shape[0]
dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)
uniq_probs = dfout['Air cargo problem'].unique()
n_plans = dfout.shape[0]
searcher_cnt = dfout['Searcher'].value_counts()
fn_cnt = dfout['search_fn'].value_counts()
# get the html string:
df_fn = fn_cnt.to_frame()
df_fn.reset_index(drop=False, inplace=True)
df_fn.columns = ['Search function', col2]
df_fn_html = df_fn.to_html(index=False, justify='center')
replace_str1 = ' style="text-align: center;"'
replace_str2 = 'class="dataframe"'
df_fn_html = df_fn_html.replace(replace_str1, '')
df_fn_html = df_fn_html.replace(replace_str2, replace_str1)
pct_plans = n_plans/dfa_rows
top2_fn = fn_cnt[0:2].sum()
pct_top2_fn = top2_fn/n_plans
text = f"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>"
text += f"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`."
if len(uniq_probs) < 4:
text += " And this occurs only for Problems: "
pro = ",".join('{}' for p in uniq_probs) +'.<br>'
text += pro.format(*uniq_probs)
else:
text += " And this occurs for all Problems."
text += "<br>"
return df_fn_html, text, dfout
def make_bar_plots(df_list,
x_col, y_col,
problems,
legend_bbox=(.05, .95),
to_file='',
show=False,
excluded=None):
"""
To get 2 bar plots in a row.
"""
import matplotlib.patches as mpatches
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
a1 = df_list[0][x_col].unique().astype(int)
a1 = a1[a1>0]
a2 = df_list[1][x_col].unique().astype(int)
a2 = a2[a2>0]
assert len(a1) == len(a2) == 1
action_nums = [a1[0], a2[0]]
p1 = df_list[0]['Air cargo problem'].iloc[0]
p2 = df_list[1]['Air cargo problem'].iloc[0]
# Seach functions names should be common to all dfs:
search = df_list[0].Searcher.tolist()
# Sample cmap according to categories:
s_len = len(search)
cmap = plt.get_cmap('viridis')
m = cmap.N // s_len
colors = [cmap.colors[i*m] for i in range(s_len)]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
# Use the minutes columns for the more complex problems:
if y_col == 'ElapsedSeconds':
ty_col = 'Elapsed time'
if p1 == 3 or p == 4: # applies to problems 3/4
y_col = 'Minutes'
else:
ty_col = y_col
plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',
y = 1.05, fontsize=14)
for i, df in enumerate(df_list):
ylog = False
ylab = f'{y_col}'
# log scale on NewNodes for df2, df3, df4:
if (i == 1 or p1 == 3) and y_col == 'NewNodes':
ylog = True
ylab += ' (log)'
axs[i].set_ylabel(ylab, fontsize=12)
df[y_col].plot.bar(ax=axs[i], logy=ylog,
color=colors,
legend=False)
t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])
axs[i].set_xlabel(t, fontsize=12)
axs[i].set_xticks([])
despine(axs[i])
legt = 'Searchers'
new_lgd = p1 == 3 and excluded is not None
if new_lgd:
# Modify the legend to indicate excluded searches
# (bc colormap is identical to fig1/2, but some runs have no data).
legt += ' (X :: excluded)'
excluded_len = len(excluded)
x_idx = [excluded[i][0]-1 for i in range(excluded_len)]
legend_patches = []
for i, c in enumerate(colors):
lab = search[i]
if new_lgd:
if SEARCHES.index(lab) in x_idx:
lab = lab.replace(' ', ' + ')
lab += ' X'
else:
lab = lab.replace(' ', ' + ')
else:
lab = lab.replace(' ', ' + ')
legend_patches.append(mpatches.Patch(color=c, label=lab))
axs[1].legend(handles=legend_patches,
title=legt,
title_fontsize='14',
fontsize='medium',
bbox_to_anchor=legend_bbox,
loc='upper left',
labelspacing=0.6,
fancybox=True)
plt.tight_layout()
if to_file:
plt.savefig(to_file)
if show:
return axs
def format_multiples(multi):
s = ''
for i in range(len(multi)):
s += '{'+ str(i) +':s}, '
s = s[:-2]
return '[' + s.format(*multi.values) + ']'
def order_analysis(df2, df1, column_to_compare):
"""
df2: has the large values.
"""
colA_larger_values = df2[column_to_compare]
colA_smaller_values = df1[column_to_compare]
# orders of magnitude difference btw dfB and dfA (min, max):
mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)
mag.sort_values(ascending=False, inplace=True)
mag_aver = int(np.round(mag.mean(), 0))
# get the indices of values above average:
ma = mag[mag > mag_aver].index.tolist()
# get the names of all searchers corresponding to the ma:
above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])
return above_multiples
def comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):
p1 = df1.loc[0,'Problem'][-1]
p2 = df2.loc[0,'Problem'][-1]
order_aver, searches_above = order_analysis(df2, df1, column_to_compare)
above = format_multiples(searches_above)
headinglc = heading.lower()
text = f"""<h3>* {heading}</h3><p style="font-size:110%;">For Problems {p1} and {p2}, """
text += f"the <i>average</i> order of magnitude difference in {headinglc} is "
text += f"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>"
if return_html:
return text
else:
return Markdown(text)
def get_elim_candidates(df2, df1):
"""
For the analysis of problems 1 & 2.
List the costliest searches: candidates for elimination on more complex problems.
"""
if df1.loc[1,'Problem']!= problems[0]:
return
nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')
time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')
elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))
# return their 1-base index also:
out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]
return out
def paragraph_p12(candidates_tup, return_html=False):
"""
For displaying the analysis of problems 1 & 2.
"""
elim_list = ""
for i, c in candidates_tup:
elim_list += f"<dt><b>{i:>2}: {c}</b></dt>"
text = """<h3>* Insights from Problems 1 and 2</h3><p style="font-size:110%;">"""
text += """On the basis of Figures 1 and 2, which show the number of new nodes created,
and the time spent by each search function, respectively, the searches that are candidates
for elimination for more complex problems are those at the intersection of the average-ranked
costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>"""
text += f"<dl>{elim_list}</dl></p></pre>"
if return_html:
return text
else:
return Markdown(text)
def add_div_around_html(div_html_text, output_string=False, div_style="{width: 80%}"):
"""
Wrap an html code str inside a div.
div_style: whatever follows style= within the <div>
Behaviour with `output_string=True`:
The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')
The only thing to do is change the cell mode to Markdown.
If `output_string=False`, the HTML/md output is displayed in an output cell.
"""
div = f"""<div style="{div_style}">{div_html_text}</div>"""
if output_string:
return div
#get_ipython().set_next_input(div, 'markdown')
else:
return Markdown(div)
|
flexible
|
{
"blob_id": "cd49230be3c418853aa2986ed727204e51a6b6ae",
"index": 3794,
"step-1": "<mask token>\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\n<mask token>\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\n<mask token>\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-2": "<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\n<mask token>\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-3": "<mask token>\nplt.style.use('seaborn-muted')\n<mask token>\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\n<mask token>\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\nfrom IPython.display import HTML, Markdown\nimport air_cargo_problems as acp\nproblems = ['Air Cargo Problem 1', 'Air Cargo Problem 2',\n 'Air Cargo Problem 3', 'Air Cargo Problem 4']\nSEARCHES = ['breadth_first_search', 'depth_first_graph_search',\n 'uniform_cost_search', 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals', 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel', 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(), acp.air_cargo_p3(),\n acp.air_cargo_p4()]\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [(i + 1) for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs], 'Planes': [len(p.planes) for\n p in Probs], 'Airports': [len(p.airports) for p in Probs], 'Goal':\n [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n return\n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=\n False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n t = '\\t'\n sfx = ['.csv', '_df.csv']\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x) + 1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n val_cols = ['Actions', 'Expansions', 'GoalTests', 'NewNodes',\n 'PlanLength', 'ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n df.columns = ['c', 'Searcher']\n df = df.reindex(columns=df.columns.tolist() + val_cols)\n sr = df.loc[df.c == 'Searcher', 'Searcher']\n for idx, sr_row in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n df.dropna(inplace=True)\n df['Minutes'] = np.round(df.ElapsedSeconds / 60, 3)\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem', 'Minutes',\n 'GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n dfa = dfa[['Air cargo problem', 'id', 'search_fn', 'Searcher',\n 'Actions', 'PlanLength', 'NewNodes', 'Expansions', 'ElapsedSeconds']]\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n dfa_rows = dfa.shape[0]\n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n pct_plans = n_plans / dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn / n_plans\n text = (\n f'Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>'\n )\n text += (\n f'In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.'\n )\n if len(uniq_probs) < 4:\n text += ' And this occurs only for Problems: '\n pro = ','.join('{}' for p in uniq_probs) + '.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += ' And this occurs for all Problems.'\n text += '<br>'\n return df_fn_html, text, dfout\n\n\ndef make_bar_plots(df_list, x_col, y_col, problems, legend_bbox=(0.05, 0.95\n ), to_file='', show=False, excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\"\n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1 > 0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2 > 0]\n assert len(a1) == len(a2) == 1\n action_nums = [a1[0], a2[0]]\n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n search = df_list[0].Searcher.tolist()\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i * m] for i in range(s_len)]\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4:\n y_col = 'Minutes'\n else:\n ty_col = y_col\n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}', y=1.05,\n fontsize=14)\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n axs[i].set_ylabel(ylab, fontsize=12)\n df[y_col].plot.bar(ax=axs[i], logy=ylog, color=colors, legend=False)\n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [(excluded[i][0] - 1) for i in range(excluded_len)]\n legend_patches = []\n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n axs[1].legend(handles=legend_patches, title=legt, title_fontsize='14',\n fontsize='medium', bbox_to_anchor=legend_bbox, loc='upper left',\n labelspacing=0.6, fancybox=True)\n plt.tight_layout()\n if to_file:\n plt.savefig(to_file)\n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{' + str(i) + ':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n mag = np.round(np.log(colA_larger_values / colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n ma = mag[mag > mag_aver].index.tolist()\n above_multiples = mag_aver, df2.loc[ma, 'Searcher']\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=\n False):\n p1 = df1.loc[0, 'Problem'][-1]\n p2 = df2.loc[0, 'Problem'][-1]\n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n headinglc = heading.lower()\n text = (\n f'<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, '\n )\n text += (\n f'the <i>average</i> order of magnitude difference in {headinglc} is ')\n text += (\n f'<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>'\n )\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1, 'Problem'] != problems[0]:\n return\n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(\n time_above[:time_order_av]))\n out = [(SEARCHES.index(c) + 1, c) for c in elim_candidates]\n return out\n\n\ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n elim_list = ''\n for i, c in candidates_tup:\n elim_list += f'<dt><b>{i:>2}: {c}</b></dt>'\n text = (\n '<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">')\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f'<dl>{elim_list}</dl></p></pre>'\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef add_div_around_html(div_html_text, output_string=False, div_style=\n '{width: 80%}'):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f'<div style=\"{div_style}\">{div_html_text}</div>'\n if output_string:\n return div\n else:\n return Markdown(div)\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-muted')\n\n#from IPython import get_ipython\nfrom IPython.display import HTML, Markdown\n\nimport air_cargo_problems as acp\n\n\nproblems = ['Air Cargo Problem 1', \n 'Air Cargo Problem 2',\n 'Air Cargo Problem 3',\n 'Air Cargo Problem 4']\n\nSEARCHES = ['breadth_first_search',\n 'depth_first_graph_search',\n 'uniform_cost_search',\n 'greedy_best_first_graph_search h_unmet_goals',\n 'greedy_best_first_graph_search h_pg_levelsum',\n 'greedy_best_first_graph_search h_pg_maxlevel',\n 'greedy_best_first_graph_search h_pg_setlevel',\n 'astar_search h_unmet_goals',\n 'astar_search h_pg_levelsum',\n 'astar_search h_pg_maxlevel',\n 'astar_search h_pg_setlevel']\n\n\ndef get_prob_specs():\n Probs = [acp.air_cargo_p1(), acp.air_cargo_p2(),\n acp.air_cargo_p3(), acp.air_cargo_p4()]\n\n problems_specs = {'Problem': [name for name in problems],\n 'Air cargo problem': [i+1 for i in range(len(problems))],\n 'Cargos': [len(p.cargos) for p in Probs],\n 'Planes': [len(p.planes) for p in Probs],\n 'Airports': [len(p.airports) for p in Probs],\n 'Goal': [len(p.goal) for p in Probs]}\n return pd.DataFrame(problems_specs)\n\nspecs = get_prob_specs()\n\n\ndef df2tsv(df, fname, replace=False):\n if Path(fname).exists():\n if replace:\n df.to_csv(fname, sep='\\t')\n #else:\n # print(f'File {fname} not replaced.')\n return\n \n df.to_csv(fname, sep='\\t')\n return\n\n\ndef get_problem_data_df(file_stem, problem, raw_dir, out_dir, file_as_tsv=False, replace=False):\n \"\"\"\n Combine all processed files of a problem found in Path(data_dir) with given stem.\n The file to be saved to/retrieved from out_dir is passed in file_as_tsv, tab separated csv.\n \n Input example:\n file_stem = 'prob_2'\n problem = 'Air Cargo Problem 2'\n Output: a dataframe, saved to tsv if file_as_tsv=True and not replace; saved as file_stem+'_df.csv'.\n \"\"\"\n if file_stem is None or problem is None:\n print('file_stem and problem must have a value.')\n return\n \n t = '\\t'\n \n # input/output file suffixes:\n sfx = ['.csv', '_df.csv']\n \n # Try retrieving it from out_dir if not replacing it:\n fout = None\n if file_as_tsv:\n fout = Path(out_dir).joinpath(file_stem + sfx[1])\n if fout.exists() and not replace:\n df = pd.read_csv(fout, sep=t)\n try:\n return df.drop('Unnamed: 0', axis=1)\n except KeyError:\n pass\n # else: (re)process\n \n pfiles = list(Path(raw_dir).glob(file_stem + '*'))\n if len(pfiles) == 0:\n print(f'No raw files with stem: {file_stem}')\n return\n \n dflist = []\n for f in pfiles:\n df, err = get_results_df(f, problem)\n \n if df is not None:\n df = df.merge(specs)\n df['index'] = df['Searcher'].apply(lambda x: SEARCHES.index(x)+1)\n df['index'] = df['index'].astype(int)\n df.set_index('index', drop=True, inplace=True)\n \n dflist.append(df)\n del df\n else:\n print(f'Error from get_results_df:\\n\\t{err}')\n \n dfout = pd.concat(dflist, ignore_index=False)\n dfout.sort_index(inplace=True)\n \n if file_as_tsv:\n df2tsv(dfout, fout, replace=replace)\n \n return dfout\n\n\ndef get_results_df(fname, problem):\n \"\"\"Process csv into dataframe.\n \"\"\"\n t = '\\t'\n \n # Cols to add:\n val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n \n # Rename cols: c (temp) -> Searcher\n df.columns = ['c', 'Searcher']\n # Add new cols & reindex\n df = df.reindex(columns = df.columns.tolist() + val_cols)\n \n # Populate new cols according to row with search name:\n sr = df.loc[df.c == 'Searcher', 'Searcher'] \n for (idx, sr_row) in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n\n df.dropna(inplace=True)\n # Add a minute column:\n df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)\n \n # Replace values of 1st col with problem name & update col name:\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df, ''\n\n\ndef concat_all_dfs(dflist):\n \"\"\"\n Output combined df for complete runs, Actions>0.\n \"\"\"\n dfall = pd.concat(dflist, ignore_index=False)\n dfall.reset_index(drop=False, inplace=True)\n dfall.rename(columns={'index': 'id'}, inplace=True)\n # reduced\n drop_cols = dfall.columns[-4:-1].tolist() + ['Problem','Minutes','GoalTests']\n dfa = dfall.drop(drop_cols, axis=1)\n del dfall\n # add col for function name\n dfa['search_fn'] = dfa.Searcher.str.partition(' ')[0]\n # reorder cols\n dfa = dfa[['Air cargo problem','id','search_fn','Searcher','Actions',\n 'PlanLength', 'NewNodes','Expansions','ElapsedSeconds']]\n\n # complete runs only:\n return dfa[dfa['Actions'].values > 0]\n\n\ndef plans_length(dfa, which):\n \"\"\"\n dfa: frame of concatenated df1 to df4.\n Analysis of plan length for which in ['double', 'single']:\n PlanLength is double(single)-digit.\n \"\"\"\n if which == 'double':\n msk = dfa.PlanLength >= 10\n col2 = 'Frequency where PlanLength >=10'\n else:\n msk = dfa.PlanLength < 10\n col2 = 'Frequency where PlanLength <10'\n \n dfa_rows = dfa.shape[0]\n \n dfout = dfa[msk].sort_values(['PlanLength'], ascending=False)\n\n uniq_probs = dfout['Air cargo problem'].unique()\n n_plans = dfout.shape[0]\n searcher_cnt = dfout['Searcher'].value_counts()\n fn_cnt = dfout['search_fn'].value_counts()\n\n # get the html string:\n df_fn = fn_cnt.to_frame()\n df_fn.reset_index(drop=False, inplace=True)\n df_fn.columns = ['Search function', col2]\n \n df_fn_html = df_fn.to_html(index=False, justify='center')\n replace_str1 = ' style=\"text-align: center;\"'\n replace_str2 = 'class=\"dataframe\"'\n df_fn_html = df_fn_html.replace(replace_str1, '')\n df_fn_html = df_fn_html.replace(replace_str2, replace_str1)\n\n pct_plans = n_plans/dfa_rows\n top2_fn = fn_cnt[0:2].sum()\n pct_top2_fn = top2_fn/n_plans\n\n text = f\"Out of {dfa_rows} completed searches, {pct_plans:.0%} ({n_plans}), have {which}-digit or longer PlanLength.<br>\"\n text += f\"In that subset, {top2_fn:d} ({pct_top2_fn:.0%}) involve the search functions `{fn_cnt.index[0]}` and `{fn_cnt.index[1]}`.\"\n if len(uniq_probs) < 4:\n text += \" And this occurs only for Problems: \"\n pro = \",\".join('{}' for p in uniq_probs) +'.<br>'\n text += pro.format(*uniq_probs)\n else:\n text += \" And this occurs for all Problems.\"\n text += \"<br>\"\n \n return df_fn_html, text, dfout\n\ndef make_bar_plots(df_list,\n x_col, y_col,\n problems,\n legend_bbox=(.05, .95),\n to_file='',\n show=False,\n excluded=None):\n \"\"\"\n To get 2 bar plots in a row.\n \"\"\" \n import matplotlib.patches as mpatches\n\n def despine(ax):\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n a1 = df_list[0][x_col].unique().astype(int)\n a1 = a1[a1>0]\n a2 = df_list[1][x_col].unique().astype(int)\n a2 = a2[a2>0]\n assert len(a1) == len(a2) == 1\n \n action_nums = [a1[0], a2[0]]\n \n p1 = df_list[0]['Air cargo problem'].iloc[0]\n p2 = df_list[1]['Air cargo problem'].iloc[0]\n \n # Seach functions names should be common to all dfs:\n search = df_list[0].Searcher.tolist()\n \n # Sample cmap according to categories:\n s_len = len(search)\n cmap = plt.get_cmap('viridis')\n m = cmap.N // s_len\n colors = [cmap.colors[i*m] for i in range(s_len)]\n \n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12,5))\n \n # Use the minutes columns for the more complex problems:\n if y_col == 'ElapsedSeconds':\n ty_col = 'Elapsed time'\n if p1 == 3 or p == 4: # applies to problems 3/4\n y_col = 'Minutes'\n else:\n ty_col = y_col\n \n plt.title(f'{ty_col} vs. {x_col} for Problems {p1} & {p2}',\n y = 1.05, fontsize=14)\n\n for i, df in enumerate(df_list):\n ylog = False\n ylab = f'{y_col}'\n # log scale on NewNodes for df2, df3, df4:\n if (i == 1 or p1 == 3) and y_col == 'NewNodes':\n ylog = True\n ylab += ' (log)'\n \n axs[i].set_ylabel(ylab, fontsize=12)\n\n df[y_col].plot.bar(ax=axs[i], logy=ylog,\n color=colors,\n legend=False)\n \n t = '{}, {} = {:d}'.format(problems[i], x_col, action_nums[i])\n axs[i].set_xlabel(t, fontsize=12)\n axs[i].set_xticks([])\n despine(axs[i])\n\n legt = 'Searchers'\n new_lgd = p1 == 3 and excluded is not None\n if new_lgd:\n # Modify the legend to indicate excluded searches\n # (bc colormap is identical to fig1/2, but some runs have no data).\n legt += ' (X :: excluded)'\n excluded_len = len(excluded)\n x_idx = [excluded[i][0]-1 for i in range(excluded_len)]\n \n legend_patches = [] \n for i, c in enumerate(colors):\n lab = search[i]\n if new_lgd:\n if SEARCHES.index(lab) in x_idx:\n lab = lab.replace(' ', ' + ')\n lab += ' X'\n else:\n lab = lab.replace(' ', ' + ')\n else:\n lab = lab.replace(' ', ' + ')\n\n legend_patches.append(mpatches.Patch(color=c, label=lab))\n \n axs[1].legend(handles=legend_patches,\n title=legt,\n title_fontsize='14',\n fontsize='medium', \n bbox_to_anchor=legend_bbox, \n loc='upper left',\n labelspacing=0.6,\n fancybox=True)\n\n plt.tight_layout()\n \n if to_file:\n plt.savefig(to_file)\n \n if show:\n return axs\n\n\ndef format_multiples(multi):\n s = ''\n for i in range(len(multi)):\n s += '{'+ str(i) +':s}, '\n s = s[:-2]\n return '[' + s.format(*multi.values) + ']'\n\n\ndef order_analysis(df2, df1, column_to_compare):\n \"\"\"\n df2: has the large values.\n \"\"\"\n colA_larger_values = df2[column_to_compare]\n colA_smaller_values = df1[column_to_compare]\n\n # orders of magnitude difference btw dfB and dfA (min, max):\n mag = np.round(np.log(colA_larger_values/colA_smaller_values), 0)\n mag.sort_values(ascending=False, inplace=True)\n mag_aver = int(np.round(mag.mean(), 0))\n\n # get the indices of values above average:\n ma = mag[mag > mag_aver].index.tolist()\n \n # get the names of all searchers corresponding to the ma:\n above_multiples = (mag_aver, df2.loc[ma, 'Searcher'])\n return above_multiples\n\n\ndef comparison_paragraph(df2, df1, heading, column_to_compare, return_html=False):\n\n p1 = df1.loc[0,'Problem'][-1]\n p2 = df2.loc[0,'Problem'][-1]\n \n order_aver, searches_above = order_analysis(df2, df1, column_to_compare)\n above = format_multiples(searches_above)\n \n headinglc = heading.lower()\n text = f\"\"\"<h3>* {heading}</h3><p style=\"font-size:110%;\">For Problems {p1} and {p2}, \"\"\"\n text += f\"the <i>average</i> order of magnitude difference in {headinglc} is \"\n text += f\"<b>{order_aver:d}</b>, which is surpassed by these searches: {above}.</p>\"\n\n if return_html:\n return text\n else:\n return Markdown(text)\n\n\ndef get_elim_candidates(df2, df1):\n \"\"\"\n For the analysis of problems 1 & 2. \n List the costliest searches: candidates for elimination on more complex problems.\n \"\"\"\n if df1.loc[1,'Problem']!= problems[0]:\n return\n \n nodes_order_av, nodes_above = order_analysis(df2, df1, 'NewNodes')\n time_order_av, time_above = order_analysis(df2, df1, 'ElapsedSeconds')\n elim_candidates = set(nodes_above[:nodes_order_av]).intersection(set(time_above[:time_order_av]))\n # return their 1-base index also:\n out = [(SEARCHES.index(c)+1, c) for c in elim_candidates]\n return out\n\n \ndef paragraph_p12(candidates_tup, return_html=False):\n \"\"\"\n For displaying the analysis of problems 1 & 2.\n \"\"\"\n\n elim_list = \"\"\n for i, c in candidates_tup:\n elim_list += f\"<dt><b>{i:>2}: {c}</b></dt>\"\n \n text = \"\"\"<h3>* Insights from Problems 1 and 2</h3><p style=\"font-size:110%;\">\"\"\"\n text += \"\"\"On the basis of Figures 1 and 2, which show the number of new nodes created, \n and the time spent by each search function, respectively, the searches that are candidates \n for elimination for more complex problems are those at the intersection of the average-ranked \n costliest sets viz new nodes creation and search time.<br>These searches are:</p><pre><dl>\"\"\"\n text += f\"<dl>{elim_list}</dl></p></pre>\"\n \n if return_html:\n return text\n else:\n return Markdown(text) \n\n \ndef add_div_around_html(div_html_text, output_string=False, div_style=\"{width: 80%}\"):\n \"\"\"\n Wrap an html code str inside a div.\n div_style: whatever follows style= within the <div>\n \n Behaviour with `output_string=True`:\n The cell is overwritten with the output string (but the cell mode is still in 'code' not 'markdown')\n The only thing to do is change the cell mode to Markdown.\n If `output_string=False`, the HTML/md output is displayed in an output cell.\n \"\"\"\n div = f\"\"\"<div style=\"{div_style}\">{div_html_text}</div>\"\"\"\n if output_string:\n return div\n #get_ipython().set_next_input(div, 'markdown')\n else:\n return Markdown(div)",
"step-ids": [
6,
12,
14,
16,
17
]
}
|
[
6,
12,
14,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
sac_gym_test()
<|reserved_special_token_1|>
from neodroidagent.entry_points.agent_tests import sac_gym_test
if __name__ == '__main__':
sac_gym_test()
<|reserved_special_token_1|>
from neodroidagent.entry_points.agent_tests import sac_gym_test
if __name__ == "__main__":
sac_gym_test()
|
flexible
|
{
"blob_id": "e9890fcf9ad2a78b3400f6e4eeb75deac8edcd6a",
"index": 1609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n sac_gym_test()\n",
"step-3": "from neodroidagent.entry_points.agent_tests import sac_gym_test\nif __name__ == '__main__':\n sac_gym_test()\n",
"step-4": "from neodroidagent.entry_points.agent_tests import sac_gym_test\n\nif __name__ == \"__main__\":\n sac_gym_test()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
raise NotImplementedError
<|reserved_special_token_0|>
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version,
descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id,
tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version,
descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id,
tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
<|reserved_special_token_0|>
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
<|reserved_special_token_0|>
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
raise NotImplementedError
<|reserved_special_token_0|>
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version,
descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id,
tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version,
descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id,
tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version,
descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id,
tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version,
descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id,
tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _get_trs_opts(service_id):
"""
Look up stored parameters for tool registry services.
"""
return trs_config()[service_id]
def _init_http_client(service_id=None, opts=None):
"""
Initialize and configure HTTP requests client for selected service.
"""
if service_id:
opts = _get_trs_opts(service_id)
http_client = RequestsClient()
http_client.set_api_key(host=opts['host'], api_key=opts['auth'],
param_in='header')
return http_client
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,
tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version,
descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,
tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id,
tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,
descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version,
descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,
descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id,
tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
def load_trs_client(service_id, http_client=None):
"""Return an API client for the selected workflow execution service."""
trs_client = TRSClient(service=_get_trs_opts(service_id))
return TRSAdapter(trs_client)
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Load API client for a Tool Registry Service (TRS) endpoint based
either on the GA4GH specification or an existing client library.
"""
import logging
from bravado.requests_client import RequestsClient
from ga4ghtest.core.config import trs_config
from .client import TRSClient
logger = logging.getLogger(__name__)
def _get_trs_opts(service_id):
"""
Look up stored parameters for tool registry services.
"""
return trs_config()[service_id]
def _init_http_client(service_id=None, opts=None):
"""
Initialize and configure HTTP requests client for selected service.
"""
if service_id:
opts = _get_trs_opts(service_id)
http_client = RequestsClient()
http_client.set_api_key(host=opts['host'],
api_key=opts['auth'],
param_in='header')
return http_client
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
def load_trs_client(service_id, http_client=None):
"""Return an API client for the selected workflow execution service."""
trs_client = TRSClient(service=_get_trs_opts(service_id))
return TRSAdapter(trs_client)
|
flexible
|
{
"blob_id": "d122267e1da2d9cf68d245148bb496dfba3e7d19",
"index": 4467,
"step-1": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _get_trs_opts(service_id):\n \"\"\"\n Look up stored parameters for tool registry services.\n \"\"\"\n return trs_config()[service_id]\n\n\ndef _init_http_client(service_id=None, opts=None):\n \"\"\"\n Initialize and configure HTTP requests client for selected service.\n \"\"\"\n if service_id:\n opts = _get_trs_opts(service_id)\n http_client = RequestsClient()\n http_client.set_api_key(host=opts['host'], api_key=opts['auth'],\n param_in='header')\n return http_client\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\ndef load_trs_client(service_id, http_client=None):\n \"\"\"Return an API client for the selected workflow execution service.\"\"\"\n trs_client = TRSClient(service=_get_trs_opts(service_id))\n return TRSAdapter(trs_client)\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nLoad API client for a Tool Registry Service (TRS) endpoint based\neither on the GA4GH specification or an existing client library.\n\"\"\"\nimport logging\n\nfrom bravado.requests_client import RequestsClient\n\nfrom ga4ghtest.core.config import trs_config\nfrom .client import TRSClient\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_trs_opts(service_id):\n \"\"\"\n Look up stored parameters for tool registry services.\n \"\"\"\n return trs_config()[service_id]\n\n\ndef _init_http_client(service_id=None, opts=None):\n \"\"\"\n Initialize and configure HTTP requests client for selected service.\n \"\"\"\n if service_id:\n opts = _get_trs_opts(service_id)\n\n http_client = RequestsClient()\n\n http_client.set_api_key(host=opts['host'],\n api_key=opts['auth'],\n param_in='header')\n return http_client\n\n\nclass TRSInterface:\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\ndef load_trs_client(service_id, http_client=None):\n \"\"\"Return an API client for the selected workflow execution service.\"\"\"\n trs_client = TRSClient(service=_get_trs_opts(service_id))\n return TRSAdapter(trs_client)\n",
"step-ids": [
18,
21,
24,
27,
30
]
}
|
[
18,
21,
24,
27,
30
] |
# Generated by Django 3.1.2 on 2021-07-02 05:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asset', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='balance',
name='title',
),
]
|
normal
|
{
"blob_id": "257f18db95e069c037341d2af372269e988b0a80",
"index": 536,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('asset', '0001_initial')]\n operations = [migrations.RemoveField(model_name='balance', name='title')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('asset', '0001_initial')]\n operations = [migrations.RemoveField(model_name='balance', name='title')]\n",
"step-5": "# Generated by Django 3.1.2 on 2021-07-02 05:38\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('asset', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='balance',\n name='title',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class StravaAuthConfig(AppConfig):
name = "strava.contrib.strava_django"
verbose_name = _("Strava Auth")
def ready(self):
pass
|
normal
|
{
"blob_id": "9e43eb3c3ab3be4e695dbc80aa005332b8d8a4ec",
"index": 9515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StravaAuthConfig(AppConfig):\n <mask token>\n <mask token>\n\n def ready(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass StravaAuthConfig(AppConfig):\n name = 'strava.contrib.strava_django'\n verbose_name = _('Strava Auth')\n\n def ready(self):\n pass\n",
"step-4": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass StravaAuthConfig(AppConfig):\n name = 'strava.contrib.strava_django'\n verbose_name = _('Strava Auth')\n\n def ready(self):\n pass\n",
"step-5": "from django.apps import AppConfig\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass StravaAuthConfig(AppConfig):\n name = \"strava.contrib.strava_django\"\n verbose_name = _(\"Strava Auth\")\n\n def ready(self):\n pass\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/python
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine("sqlite:///banco.db")
Base = declarative_base()
Session = sessionmaker()
Session.configure(bind=engine)
session = Session()
class Funcionario(Base):
__tablename__ = 'funcionario'
id = Column(Integer,primary_key=True)
nome = Column(String)
dependente = relationship("Dependente")
class Dependente(Base):
__tablename__ = "dependente"
id = Column(Integer,primary_key=True)
nome = Column(String)
funcionario_id = Column(Integer,ForeignKey("funcionario.id"))
if __name__ == "__main__":
Base.metadata.create_all(engine)
# Buscando funcionario
result = session.query(Funcionario,Dependente) \
.join(Dependente) \
.filter(Funcionario.id==1).first()
dependente = session.query(Dependente).filter_by(id=1).first()
session.delete(dependente)
session.commit()
print "Funcionario: ",result.Funcionario.nome
for d in result.Funcionario.dependente:
print "Dependente: ",d.nome
|
normal
|
{
"blob_id": "6d5257158a7d2eef63faf2fea27f36721d4349ae",
"index": 4273,
"step-1": "#!/usr/bin/python\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\n\nengine = create_engine(\"sqlite:///banco.db\")\nBase = declarative_base()\nSession = sessionmaker()\nSession.configure(bind=engine)\nsession = Session()\n\nclass Funcionario(Base):\n __tablename__ = 'funcionario'\n id = Column(Integer,primary_key=True)\n nome = Column(String)\n dependente = relationship(\"Dependente\")\n\nclass Dependente(Base):\n __tablename__ = \"dependente\"\n id = Column(Integer,primary_key=True)\n nome = Column(String)\n funcionario_id = Column(Integer,ForeignKey(\"funcionario.id\"))\n\nif __name__ == \"__main__\":\n Base.metadata.create_all(engine)\n # Buscando funcionario\n result = session.query(Funcionario,Dependente) \\\n .join(Dependente) \\\n .filter(Funcionario.id==1).first()\n dependente = session.query(Dependente).filter_by(id=1).first()\n session.delete(dependente)\n session.commit()\n print \"Funcionario: \",result.Funcionario.nome\n for d in result.Funcionario.dependente:\n print \"Dependente: \",d.nome\n \n\n\n\n\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def split_the_bill(x):
owed_dict = {}
sum = 0
people = 0
for key in x:
sum = sum + x[key]
people = people + 1
price_pp = sum / people
for key in x:
owed_value = x[key] - price_pp
owed_dict[key] = round(owed_value, 2)
return owed_dict
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def split_the_bill(x):
owed_dict = {}
sum = 0
people = 0
for key in x:
sum = sum + x[key]
people = people + 1
price_pp = sum / people
for key in x:
owed_value = x[key] - price_pp
owed_dict[key] = round(owed_value, 2)
return owed_dict
split_the_bill(group)
<|reserved_special_token_1|>
group = {'A': 20, 'B': 15, 'C': 10}
def split_the_bill(x):
owed_dict = {}
sum = 0
people = 0
for key in x:
sum = sum + x[key]
people = people + 1
price_pp = sum / people
for key in x:
owed_value = x[key] - price_pp
owed_dict[key] = round(owed_value, 2)
return owed_dict
split_the_bill(group)
|
flexible
|
{
"blob_id": "69d7e7eb644a67ee921086005f0a55f39507f361",
"index": 2864,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\nsplit_the_bill(group)\n",
"step-4": "group = {'A': 20, 'B': 15, 'C': 10}\n\n\ndef split_the_bill(x):\n owed_dict = {}\n sum = 0\n people = 0\n for key in x:\n sum = sum + x[key]\n people = people + 1\n price_pp = sum / people\n for key in x:\n owed_value = x[key] - price_pp\n owed_dict[key] = round(owed_value, 2)\n return owed_dict\n\n\nsplit_the_bill(group)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SoftmaxWithLossLayer:
<|reserved_special_token_0|>
def __init__(self):
self.y = None
self.t = None
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None
self.t = None
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.pardir)
<|reserved_special_token_0|>
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None
self.t = None
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
if __name__ == '__main__':
softmax_with_loss_layer = SoftmaxWithLossLayer()
x = np.array([5, 1, 0])
t = np.array([1, 0, 0])
loss = softmax_with_loss_layer.forward(x, t)
print('loss = {0}'.format(loss))
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])
ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
loss = softmax_with_loss_layer.forward(xs, ts)
print('loss = {0}'.format(loss))
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
<|reserved_special_token_1|>
import sys
import os
sys.path.append(os.pardir)
from ch03.softmax import softmax
from ch04.cross_entropy_error_batch import cross_entropy_error
import numpy as np
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None
self.t = None
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
if __name__ == '__main__':
softmax_with_loss_layer = SoftmaxWithLossLayer()
x = np.array([5, 1, 0])
t = np.array([1, 0, 0])
loss = softmax_with_loss_layer.forward(x, t)
print('loss = {0}'.format(loss))
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])
ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
loss = softmax_with_loss_layer.forward(xs, ts)
print('loss = {0}'.format(loss))
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
<|reserved_special_token_1|>
import sys
import os
sys.path.append(os.pardir)
from ch03.softmax import softmax
from ch04.cross_entropy_error_batch import cross_entropy_error
import numpy as np
class SoftmaxWithLossLayer:
"""
x -> [Softmax] -> y -> [CrossEntropyError with t] -> out
In the textbook, this class has `loss` field.
"""
def __init__(self):
self.y = None # output from Softmax
self.t = None # teacher data
def forward(self, x, t):
"""
x: input to softmax
t: teacher data
"""
self.t = t
self.y = softmax(x)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
doutdx = (self.y - self.t) / batch_size
return doutdx
if __name__ == '__main__':
softmax_with_loss_layer = SoftmaxWithLossLayer()
# forward(non-batch)
x = np.array([5, 1, 0]) # x is like t
t = np.array([1, 0, 0])
loss = softmax_with_loss_layer.forward(x, t)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
# forward(batch)
xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]]) # x[1] and x[2] have large difference with t
ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])
loss = softmax_with_loss_layer.forward(xs, ts)
print('loss = {0}'.format(loss))
# backward
dout = 1
doutdx = softmax_with_loss_layer.backward(dout)
print('doutdx = {0}'.format(doutdx))
|
flexible
|
{
"blob_id": "8ae64c65d6d5dc9f2a99aeceff31657deff06c15",
"index": 5236,
"step-1": "<mask token>\n\n\nclass SoftmaxWithLossLayer:\n <mask token>\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n x = np.array([5, 1, 0])\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-4": "import sys\nimport os\nsys.path.append(os.pardir)\nfrom ch03.softmax import softmax\nfrom ch04.cross_entropy_error_batch import cross_entropy_error\nimport numpy as np\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None\n self.t = None\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n x = np.array([5, 1, 0])\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]])\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-5": "import sys\nimport os\nsys.path.append(os.pardir)\nfrom ch03.softmax import softmax\nfrom ch04.cross_entropy_error_batch import cross_entropy_error\nimport numpy as np\n\n\nclass SoftmaxWithLossLayer:\n \"\"\"\n x -> [Softmax] -> y -> [CrossEntropyError with t] -> out\n\n In the textbook, this class has `loss` field.\n \"\"\"\n\n def __init__(self):\n self.y = None # output from Softmax\n self.t = None # teacher data\n\n def forward(self, x, t):\n \"\"\"\n x: input to softmax\n t: teacher data\n \"\"\"\n self.t = t\n self.y = softmax(x)\n loss = cross_entropy_error(self.y, self.t)\n return loss\n\n def backward(self, dout=1):\n batch_size = self.t.shape[0]\n doutdx = (self.y - self.t) / batch_size\n return doutdx\n\n\nif __name__ == '__main__':\n softmax_with_loss_layer = SoftmaxWithLossLayer()\n\n # forward(non-batch)\n x = np.array([5, 1, 0]) # x is like t\n t = np.array([1, 0, 0])\n loss = softmax_with_loss_layer.forward(x, t)\n print('loss = {0}'.format(loss))\n\n # backward\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n\n # forward(batch)\n xs = np.array([[5, 1, 0], [3, 0, 2], [1, 1, 5], [4, 1, 1]]) # x[1] and x[2] have large difference with t\n ts = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0]])\n loss = softmax_with_loss_layer.forward(xs, ts)\n print('loss = {0}'.format(loss))\n\n # backward\n dout = 1\n doutdx = softmax_with_loss_layer.backward(dout)\n print('doutdx = {0}'.format(doutdx))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(New_list)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lst = [1, -3, 4, -56, 7, 3, -8, -5, 2, 4, 9]
New_list = list(filter(lambda x: x > 0, lst))
print(New_list)
<|reserved_special_token_1|>
'''4. Write a Python program to filter the positive numbers from a list.'''
lst = [1, -3, 4, -56, 7, 3, -8, -5, 2, 4, 9]
New_list = list(filter(lambda x: x > 0, lst))
print(New_list)
|
flexible
|
{
"blob_id": "d61151859390ab1c907ac3753143312da434981e",
"index": 2624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(New_list)\n",
"step-3": "<mask token>\nlst = [1, -3, 4, -56, 7, 3, -8, -5, 2, 4, 9]\nNew_list = list(filter(lambda x: x > 0, lst))\nprint(New_list)\n",
"step-4": "'''4. Write a Python program to filter the positive numbers from a list.'''\n\nlst = [1, -3, 4, -56, 7, 3, -8, -5, 2, 4, 9]\nNew_list = list(filter(lambda x: x > 0, lst))\nprint(New_list)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE] * len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph))
)
<|reserved_special_token_1|>
# Time: O(|V| + |E|)
# Space: O(|V|)
class Solution(object):
def eventualSafeNodes(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[int]
"""
WHITE, GRAY, BLACK = range(3)
def dfs(graph, node, lookup):
if lookup[node] != WHITE:
return lookup[node] == BLACK
lookup[node] = GRAY
if any(not dfs(graph, child, lookup) for child in graph[node]):
return False
lookup[node] = BLACK
return True
lookup = [WHITE]*len(graph)
return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))
|
flexible
|
{
"blob_id": "5c5cfcd240c8b05970dc8dff57bfbbdc98f1d100",
"index": 9838,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n lookup = [WHITE] * len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph))\n )\n",
"step-4": "# Time: O(|V| + |E|)\n# Space: O(|V|)\n\nclass Solution(object):\n def eventualSafeNodes(self, graph):\n \"\"\"\n :type graph: List[List[int]]\n :rtype: List[int]\n \"\"\"\n WHITE, GRAY, BLACK = range(3)\n\n def dfs(graph, node, lookup):\n if lookup[node] != WHITE:\n return lookup[node] == BLACK\n lookup[node] = GRAY\n if any(not dfs(graph, child, lookup) for child in graph[node]):\n return False\n lookup[node] = BLACK\n return True\n\n lookup = [WHITE]*len(graph)\n return filter(lambda node: dfs(graph, node, lookup), xrange(len(graph)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__(
'Missing transition detected')
<|reserved_special_token_0|>
class Suppressions(admin.MConfigHandler):
"""
Set up supported arguments
"""
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':
'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':
'ACTION_RELOAD'}
WRITE_CAPABILITY = 'edit_suppressions'
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if (self.requestedAction == admin.ACTION_EDIT or self.
requestedAction == admin.ACTION_CREATE):
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
"""Handles creation of a suppression."""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
if not name or len(name) == 0:
raise admin.ArgValidationException(
'The name of the suppression must not be empty')
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention')
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException(
'A suppression entry already exists for %s' % name)
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self
.getSessionKey())
conf.namespace = self.appName
conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
log_data = {'action': 'create', 'suppression': conf['name'][len(
NotableEventSuppression.SUPPRESSION_START):], 'user': conf[
'eai:acl']['owner'], 'status': 'success', 'signature':
'Notable event suppression successfully created'}
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The configuration for the new suppression '%s' is invalid and could not be created: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = (NotableEventSuppression.
disable_expired_suppressions(session_key=self.getSessionKey()))
logger.info(
'%s expired suppressions detected; %s were enabled (now disabled)'
, expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
suppressionDict = self.readConfCtx('eventtypes')
if suppressionDict != None:
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(
stanza)
if stanzaMatch:
try:
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error(
"The configuration for suppression '%s' is invalid: %s"
, stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info(
'Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes',
sessionKey=self.getSessionKey())
except Exception as e:
logger.warn(
'Could not refresh suppression configurations via properties endpoint: %s'
, str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention'
)
try:
conf = entity.getEntity('saved/eventtypes', name,
sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException(
"A suppression configuration with the given name '%s' could not be found"
% name)
else:
raise admin.ArgValidationException('No name provided')
log_data = {'status': 'success', 'action': 'edit', 'signature':
'Notable event suppression successfully saved', 'suppression':
name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':
conf['eai:userName']}
for key, val in conf.items():
if key in args.data:
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key],
enableStrictMode=True)
new_value = util.normalizeBoolean(new_value,
enableStrictMode=True)
if conf_key != new_value:
log_data['action'
] = 'disable' if new_value else 'enable'
log_data['signature'] = (
'Suppression successfully disabled' if
new_value else 'Suppression successfully enabled')
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The edit attempt for the suppression '%s' produced an invalid configuration: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'
] = 'Error occurred while enabling the suppression: ' + str(
e)
else:
log_data['signature'
] = 'Error occurred while disabling the suppression: ' + str(
e)
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info("Successfully updated the '%s' suppression", name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleRemove(self, confInfo):
owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,
namespace=self.appName, owner=owner, sessionKey=self.
getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None,
throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
for key, val in settings.items():
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
else:
pass
logger.info("Checking general settings for the '%s' suppression",
stanza)
for key, val in settings.items():
if val is None:
val = ''
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
try:
required_fields.remove(key)
except ValueError:
pass
except ValueError:
raise InvalidParameterValueException(key, val,
'must be a valid boolean')
elif key in Suppressions.REQUIRED_PARAMS:
try:
required_fields.remove(key)
except ValueError:
pass
elif key in Suppressions.VALID_PARAMS:
pass
elif key.startswith(admin.EAI_META_PREFIX):
pass
elif throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn(
"The configuration for '%s' contains an unsupported parameter: %s"
, stanza, key)
if len(required_fields) > 0:
raise InvalidConfigException(
'The following fields must be defined in the configuration but were not: '
+ ', '.join(required_fields).strip())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__(
'Missing transition detected')
<|reserved_special_token_0|>
class Suppressions(admin.MConfigHandler):
"""
Set up supported arguments
"""
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':
'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':
'ACTION_RELOAD'}
WRITE_CAPABILITY = 'edit_suppressions'
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if (self.requestedAction == admin.ACTION_EDIT or self.
requestedAction == admin.ACTION_CREATE):
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
"""Handles creation of a suppression."""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
if not name or len(name) == 0:
raise admin.ArgValidationException(
'The name of the suppression must not be empty')
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention')
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException(
'A suppression entry already exists for %s' % name)
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self
.getSessionKey())
conf.namespace = self.appName
conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
log_data = {'action': 'create', 'suppression': conf['name'][len(
NotableEventSuppression.SUPPRESSION_START):], 'user': conf[
'eai:acl']['owner'], 'status': 'success', 'signature':
'Notable event suppression successfully created'}
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The configuration for the new suppression '%s' is invalid and could not be created: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = (NotableEventSuppression.
disable_expired_suppressions(session_key=self.getSessionKey()))
logger.info(
'%s expired suppressions detected; %s were enabled (now disabled)'
, expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
suppressionDict = self.readConfCtx('eventtypes')
if suppressionDict != None:
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(
stanza)
if stanzaMatch:
try:
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error(
"The configuration for suppression '%s' is invalid: %s"
, stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info(
'Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes',
sessionKey=self.getSessionKey())
except Exception as e:
logger.warn(
'Could not refresh suppression configurations via properties endpoint: %s'
, str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention'
)
try:
conf = entity.getEntity('saved/eventtypes', name,
sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException(
"A suppression configuration with the given name '%s' could not be found"
% name)
else:
raise admin.ArgValidationException('No name provided')
log_data = {'status': 'success', 'action': 'edit', 'signature':
'Notable event suppression successfully saved', 'suppression':
name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':
conf['eai:userName']}
for key, val in conf.items():
if key in args.data:
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key],
enableStrictMode=True)
new_value = util.normalizeBoolean(new_value,
enableStrictMode=True)
if conf_key != new_value:
log_data['action'
] = 'disable' if new_value else 'enable'
log_data['signature'] = (
'Suppression successfully disabled' if
new_value else 'Suppression successfully enabled')
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The edit attempt for the suppression '%s' produced an invalid configuration: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'
] = 'Error occurred while enabling the suppression: ' + str(
e)
else:
log_data['signature'
] = 'Error occurred while disabling the suppression: ' + str(
e)
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info("Successfully updated the '%s' suppression", name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleRemove(self, confInfo):
owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,
namespace=self.appName, owner=owner, sessionKey=self.
getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None,
throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
for key, val in settings.items():
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
else:
pass
logger.info("Checking general settings for the '%s' suppression",
stanza)
for key, val in settings.items():
if val is None:
val = ''
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
try:
required_fields.remove(key)
except ValueError:
pass
except ValueError:
raise InvalidParameterValueException(key, val,
'must be a valid boolean')
elif key in Suppressions.REQUIRED_PARAMS:
try:
required_fields.remove(key)
except ValueError:
pass
elif key in Suppressions.VALID_PARAMS:
pass
elif key.startswith(admin.EAI_META_PREFIX):
pass
elif throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn(
"The configuration for '%s' contains an unsupported parameter: %s"
, stanza, key)
if len(required_fields) > 0:
raise InvalidConfigException(
'The following fields must be defined in the configuration but were not: '
+ ', '.join(required_fields).strip())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InvalidParameterValueException(InvalidConfigException):
<|reserved_special_token_0|>
def __init__(self, field, value, value_must_be):
message = (
"The value for the parameter '%s' is invalid: %s (was %s)" % (
field, value_must_be, value))
super(InvalidConfigException, self).__init__(message)
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__(
'Missing transition detected')
<|reserved_special_token_0|>
class Suppressions(admin.MConfigHandler):
"""
Set up supported arguments
"""
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':
'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':
'ACTION_RELOAD'}
WRITE_CAPABILITY = 'edit_suppressions'
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if (self.requestedAction == admin.ACTION_EDIT or self.
requestedAction == admin.ACTION_CREATE):
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
"""Handles creation of a suppression."""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
if not name or len(name) == 0:
raise admin.ArgValidationException(
'The name of the suppression must not be empty')
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention')
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException(
'A suppression entry already exists for %s' % name)
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self
.getSessionKey())
conf.namespace = self.appName
conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
log_data = {'action': 'create', 'suppression': conf['name'][len(
NotableEventSuppression.SUPPRESSION_START):], 'user': conf[
'eai:acl']['owner'], 'status': 'success', 'signature':
'Notable event suppression successfully created'}
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The configuration for the new suppression '%s' is invalid and could not be created: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = (NotableEventSuppression.
disable_expired_suppressions(session_key=self.getSessionKey()))
logger.info(
'%s expired suppressions detected; %s were enabled (now disabled)'
, expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
suppressionDict = self.readConfCtx('eventtypes')
if suppressionDict != None:
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(
stanza)
if stanzaMatch:
try:
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error(
"The configuration for suppression '%s' is invalid: %s"
, stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info(
'Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes',
sessionKey=self.getSessionKey())
except Exception as e:
logger.warn(
'Could not refresh suppression configurations via properties endpoint: %s'
, str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention'
)
try:
conf = entity.getEntity('saved/eventtypes', name,
sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException(
"A suppression configuration with the given name '%s' could not be found"
% name)
else:
raise admin.ArgValidationException('No name provided')
log_data = {'status': 'success', 'action': 'edit', 'signature':
'Notable event suppression successfully saved', 'suppression':
name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':
conf['eai:userName']}
for key, val in conf.items():
if key in args.data:
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key],
enableStrictMode=True)
new_value = util.normalizeBoolean(new_value,
enableStrictMode=True)
if conf_key != new_value:
log_data['action'
] = 'disable' if new_value else 'enable'
log_data['signature'] = (
'Suppression successfully disabled' if
new_value else 'Suppression successfully enabled')
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The edit attempt for the suppression '%s' produced an invalid configuration: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'
] = 'Error occurred while enabling the suppression: ' + str(
e)
else:
log_data['signature'
] = 'Error occurred while disabling the suppression: ' + str(
e)
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info("Successfully updated the '%s' suppression", name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleRemove(self, confInfo):
owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,
namespace=self.appName, owner=owner, sessionKey=self.
getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None,
throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
for key, val in settings.items():
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
else:
pass
logger.info("Checking general settings for the '%s' suppression",
stanza)
for key, val in settings.items():
if val is None:
val = ''
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
try:
required_fields.remove(key)
except ValueError:
pass
except ValueError:
raise InvalidParameterValueException(key, val,
'must be a valid boolean')
elif key in Suppressions.REQUIRED_PARAMS:
try:
required_fields.remove(key)
except ValueError:
pass
elif key in Suppressions.VALID_PARAMS:
pass
elif key.startswith(admin.EAI_META_PREFIX):
pass
elif throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn(
"The configuration for '%s' contains an unsupported parameter: %s"
, stanza, key)
if len(required_fields) > 0:
raise InvalidConfigException(
'The following fields must be defined in the configuration but were not: '
+ ', '.join(required_fields).strip())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.platform == 'win32':
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
<|reserved_special_token_0|>
sys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-Utils', 'lib']))
<|reserved_special_token_0|>
logger.setLevel(logging.INFO)
class InvalidConfigException(Exception):
pass
class InvalidParameterValueException(InvalidConfigException):
"""
Describes a config parameter that has an invalid value.
"""
def __init__(self, field, value, value_must_be):
message = (
"The value for the parameter '%s' is invalid: %s (was %s)" % (
field, value_must_be, value))
super(InvalidConfigException, self).__init__(message)
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__(
'Missing transition detected')
def _getFieldValue(args, name, default_value=None, max_length=None):
"""Get the field value from the argument list."""
value = args[name][0] or default_value if name in args else default_value
if value and max_length and len(value) > max_length:
raise admin.ArgValidationException(
'App %s cannot be longer than %s character%s.' % (name,
max_length, 's' if max_length > 1 else ''))
return value
def _addToDictIfNonNull(d, name, value):
"""Add the given name and value to the dictionary if the value is not none.
Arguments:
d -- the dictionary to add to
name -- the name of the object to add
value -- the value of the object to add (if not none)
"""
if value is not None:
d[name] = value
class Suppressions(admin.MConfigHandler):
"""
Set up supported arguments
"""
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':
'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':
'ACTION_RELOAD'}
WRITE_CAPABILITY = 'edit_suppressions'
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if (self.requestedAction == admin.ACTION_EDIT or self.
requestedAction == admin.ACTION_CREATE):
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
"""Handles creation of a suppression."""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
if not name or len(name) == 0:
raise admin.ArgValidationException(
'The name of the suppression must not be empty')
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention')
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException(
'A suppression entry already exists for %s' % name)
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self
.getSessionKey())
conf.namespace = self.appName
conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
log_data = {'action': 'create', 'suppression': conf['name'][len(
NotableEventSuppression.SUPPRESSION_START):], 'user': conf[
'eai:acl']['owner'], 'status': 'success', 'signature':
'Notable event suppression successfully created'}
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The configuration for the new suppression '%s' is invalid and could not be created: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = (NotableEventSuppression.
disable_expired_suppressions(session_key=self.getSessionKey()))
logger.info(
'%s expired suppressions detected; %s were enabled (now disabled)'
, expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
suppressionDict = self.readConfCtx('eventtypes')
if suppressionDict != None:
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(
stanza)
if stanzaMatch:
try:
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error(
"The configuration for suppression '%s' is invalid: %s"
, stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info(
'Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes',
sessionKey=self.getSessionKey())
except Exception as e:
logger.warn(
'Could not refresh suppression configurations via properties endpoint: %s'
, str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException(
'The name of the suppression must follow proper convention'
)
try:
conf = entity.getEntity('saved/eventtypes', name,
sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException(
"A suppression configuration with the given name '%s' could not be found"
% name)
else:
raise admin.ArgValidationException('No name provided')
log_data = {'status': 'success', 'action': 'edit', 'signature':
'Notable event suppression successfully saved', 'suppression':
name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':
conf['eai:userName']}
for key, val in conf.items():
if key in args.data:
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key],
enableStrictMode=True)
new_value = util.normalizeBoolean(new_value,
enableStrictMode=True)
if conf_key != new_value:
log_data['action'
] = 'disable' if new_value else 'enable'
log_data['signature'] = (
'Suppression successfully disabled' if
new_value else 'Suppression successfully enabled')
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = (
"The edit attempt for the suppression '%s' produced an invalid configuration: %s"
% (name, str(e)))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'
] = 'Error occurred while enabling the suppression: ' + str(
e)
else:
log_data['signature'
] = 'Error occurred while disabling the suppression: ' + str(
e)
logger.error(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
raise admin.ArgValidationException(e)
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info("Successfully updated the '%s' suppression", name)
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info(
'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'
.format(**log_data))
def handleRemove(self, confInfo):
owner = (self.context == admin.CONTEXT_APP_AND_USER and self.
userName or '-')
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,
namespace=self.appName, owner=owner, sessionKey=self.
getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None,
throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
for key, val in settings.items():
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
else:
pass
logger.info("Checking general settings for the '%s' suppression",
stanza)
for key, val in settings.items():
if val is None:
val = ''
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
try:
required_fields.remove(key)
except ValueError:
pass
except ValueError:
raise InvalidParameterValueException(key, val,
'must be a valid boolean')
elif key in Suppressions.REQUIRED_PARAMS:
try:
required_fields.remove(key)
except ValueError:
pass
elif key in Suppressions.VALID_PARAMS:
pass
elif key.startswith(admin.EAI_META_PREFIX):
pass
elif throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn(
"The configuration for '%s' contains an unsupported parameter: %s"
, stanza, key)
if len(required_fields) > 0:
raise InvalidConfigException(
'The following fields must be defined in the configuration but were not: '
+ ', '.join(required_fields).strip())
admin.init(Suppressions, admin.CONTEXT_APP_AND_USER)
<|reserved_special_token_1|>
"""
Copyright (C) 2005 - 2016 Splunk Inc. All Rights Reserved.
"""
import logging
import sys
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
import splunk.admin as admin
import splunk.entity as entity
import splunk.util as util
from notable_event_suppression import NotableEventSuppression
from splunk import ResourceNotFound
from splunk.clilib.bundle_paths import make_splunkhome_path
sys.path.append(make_splunkhome_path(["etc", "apps", "SA-Utils", "lib"]))
from SolnCommon.log import setup_logger, SHORT_FORMAT
logger = setup_logger('suppressions_rest_handler', format=SHORT_FORMAT)
logger.setLevel(logging.INFO)
class InvalidConfigException(Exception):
pass
class InvalidParameterValueException(InvalidConfigException):
"""
Describes a config parameter that has an invalid value.
"""
def __init__(self, field, value, value_must_be):
message = "The value for the parameter '%s' is invalid: %s (was %s)" % (field, value_must_be, value)
super(InvalidConfigException, self).__init__(message)
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__("Missing transition detected")
def _getFieldValue(args, name, default_value=None, max_length=None):
'''Get the field value from the argument list.'''
# Get the value if defined or the default value if not defined
value = args[name][0] or default_value if name in args else default_value
# Check the length
if value and max_length and len(value) > max_length:
raise admin.ArgValidationException(
'App %s cannot be longer than %s character%s.' % (name, max_length, "s" if max_length > 1 else ""))
return value
def _addToDictIfNonNull(d, name, value):
'''Add the given name and value to the dictionary if the value is not none.
Arguments:
d -- the dictionary to add to
name -- the name of the object to add
value -- the value of the object to add (if not none)
'''
if value is not None:
d[name] = value
class Suppressions(admin.MConfigHandler):
'''
Set up supported arguments
'''
# admin.py constants
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4': 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32': 'ACTION_RELOAD'}
# Permissions
WRITE_CAPABILITY = 'edit_suppressions'
# Default Params
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
# Configuration key mapping
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
# Default Vals
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
# set write capability
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:
# Fill required params
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
# Fill valid params
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
'''Handles creation of a suppression.'''
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
# Make sure the name is not empty
if not name or len(name) == 0:
raise admin.ArgValidationException("The name of the suppression must not be empty")
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
# Make sure the item does not already exist
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException("A suppression entry already exists for %s" % (name))
# Get the field values
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
# Add the field values to a configuration dictionary (that will be verified)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self.getSessionKey())
conf.namespace = self.appName # always save things to SOME app context.
conf.owner = self.context == admin.CONTEXT_APP_AND_USER and self.userName or "-"
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
## Notable Suppression Audit Log Data
log_data = {
'action': 'create',
'suppression': conf['name'][len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:acl']['owner'],
'status': 'success',
'signature': 'Notable event suppression successfully created'
}
# Check the configuration
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The configuration for the new suppression '%s' is invalid and could not be created: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = NotableEventSuppression.disable_expired_suppressions(session_key=self.getSessionKey())
logger.info("%s expired suppressions detected; %s were enabled (now disabled)", expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
# Get the configurations from suppression.conf
suppressionDict = self.readConfCtx('eventtypes')
# Get all suppressions and provide the relevant options
if suppressionDict != None:
# Check each conf
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(stanza)
if stanzaMatch:
try:
# Check config
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error("The configuration for suppression '%s' is invalid: %s", stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info('Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes', sessionKey=self.getSessionKey())
except Exception as e:
logger.warn('Could not refresh suppression configurations via properties endpoint: %s', str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
try:
conf = entity.getEntity('saved/eventtypes', name, sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException("A suppression configuration with the given name '%s' could not be found" % (name))
else:
# Stop if no name was provided
raise admin.ArgValidationException("No name provided")
## Notable Suppression Audit Log Data
log_data = {
'status': 'success',
'action': 'edit',
'signature': 'Notable event suppression successfully saved',
'suppression': name[len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:userName']
}
# Create the resulting configuration that would be persisted if the settings provided are applied
for key, val in conf.items():
if key in args.data:
# Set the value to a single space so that the field is set to a blank value
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
## If a value other than the 'disabled' param is changed, it
# came from the editor, otherwise the lister.
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key], enableStrictMode=True)
new_value = util.normalizeBoolean(new_value, enableStrictMode=True)
if conf_key != new_value:
log_data['action'] = 'disable' if new_value else 'enable'
log_data['signature'] = 'Suppression successfully disabled' if new_value else 'Suppression successfully enabled'
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
# Check config
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The edit attempt for the suppression '%s' produced an invalid configuration: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'] = 'Error occurred while enabling the suppression: ' + str(e)
else:
log_data['signature'] = 'Error occurred while disabling the suppression: ' + str(e)
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
# Log that the suppression was updated
logger.info("Successfully updated the '%s' suppression", name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleRemove(self, confInfo):
owner = ((self.context == admin.CONTEXT_APP_AND_USER) and self.userName) or "-"
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id, namespace=self.appName, owner=owner, sessionKey=self.getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
# Below is a list of the required fields. The entries in this list will be removed as they
# are observed. An empty list at the end of the config check indicates that all necessary
# fields where provided.
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
# Add each of the settings
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
# Key is eai; Set meta
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
# Key is eai; userName/appName
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
# Key is not proper
else:
pass
# Check each of the settings individually
logger.info("Checking general settings for the '%s' suppression", stanza)
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
# Check the disabled/selected value
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
except ValueError:
raise InvalidParameterValueException(key, val, "must be a valid boolean")
elif key in Suppressions.REQUIRED_PARAMS:
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
elif key in Suppressions.VALID_PARAMS:
pass
# Key is eai
elif key.startswith(admin.EAI_META_PREFIX):
pass
# Key is not proper
else:
if throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn("The configuration for '%s' contains an unsupported parameter: %s", stanza, key)
# Error if some of the required fields were not provided
if len(required_fields) > 0:
raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())
# initialize the handler
admin.init(Suppressions, admin.CONTEXT_APP_AND_USER)
|
flexible
|
{
"blob_id": "675dc9467dd6db9c2a429941af56d78d6c0e1c08",
"index": 4135,
"step-1": "<mask token>\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n <mask token>\n\n def __init__(self, field, value, value_must_be):\n message = (\n \"The value for the parameter '%s' is invalid: %s (was %s)\" % (\n field, value_must_be, value))\n super(InvalidConfigException, self).__init__(message)\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-4": "<mask token>\nif sys.platform == 'win32':\n import os, msvcrt\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)\n<mask token>\nsys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-Utils', 'lib']))\n<mask token>\nlogger.setLevel(logging.INFO)\n\n\nclass InvalidConfigException(Exception):\n pass\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that has an invalid value.\n \"\"\"\n\n def __init__(self, field, value, value_must_be):\n message = (\n \"The value for the parameter '%s' is invalid: %s (was %s)\" % (\n field, value_must_be, value))\n super(InvalidConfigException, self).__init__(message)\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\ndef _getFieldValue(args, name, default_value=None, max_length=None):\n \"\"\"Get the field value from the argument list.\"\"\"\n value = args[name][0] or default_value if name in args else default_value\n if value and max_length and len(value) > max_length:\n raise admin.ArgValidationException(\n 'App %s cannot be longer than %s character%s.' % (name,\n max_length, 's' if max_length > 1 else ''))\n return value\n\n\ndef _addToDictIfNonNull(d, name, value):\n \"\"\"Add the given name and value to the dictionary if the value is not none.\n \n Arguments:\n d -- the dictionary to add to\n name -- the name of the object to add\n value -- the value of the object to add (if not none)\n \"\"\"\n if value is not None:\n d[name] = value\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\nadmin.init(Suppressions, admin.CONTEXT_APP_AND_USER)\n",
"step-5": "\"\"\"\nCopyright (C) 2005 - 2016 Splunk Inc. All Rights Reserved.\n\"\"\"\nimport logging\nimport sys\n\nif sys.platform == \"win32\":\n import os, msvcrt\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)\n\nimport splunk.admin as admin\nimport splunk.entity as entity\nimport splunk.util as util\n\nfrom notable_event_suppression import NotableEventSuppression\nfrom splunk import ResourceNotFound\nfrom splunk.clilib.bundle_paths import make_splunkhome_path\nsys.path.append(make_splunkhome_path([\"etc\", \"apps\", \"SA-Utils\", \"lib\"]))\nfrom SolnCommon.log import setup_logger, SHORT_FORMAT\n\nlogger = setup_logger('suppressions_rest_handler', format=SHORT_FORMAT)\nlogger.setLevel(logging.INFO)\n\n\nclass InvalidConfigException(Exception):\n pass\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that has an invalid value.\n \"\"\"\n \n def __init__(self, field, value, value_must_be):\n message = \"The value for the parameter '%s' is invalid: %s (was %s)\" % (field, value_must_be, value)\n super(InvalidConfigException, self).__init__(message)\n \n \nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\"Missing transition detected\")\n \n \ndef _getFieldValue(args, name, default_value=None, max_length=None):\n '''Get the field value from the argument list.'''\n \n # Get the value if defined or the default value if not defined\n value = args[name][0] or default_value if name in args else default_value\n \n # Check the length\n if value and max_length and len(value) > max_length:\n raise admin.ArgValidationException(\n 'App %s cannot be longer than %s character%s.' % (name, max_length, \"s\" if max_length > 1 else \"\"))\n return value\n \n\ndef _addToDictIfNonNull(d, name, value):\n '''Add the given name and value to the dictionary if the value is not none.\n \n Arguments:\n d -- the dictionary to add to\n name -- the name of the object to add\n value -- the value of the object to add (if not none)\n '''\n if value is not None:\n d[name] = value\n\n \nclass Suppressions(admin.MConfigHandler):\n '''\n Set up supported arguments\n '''\n # admin.py constants\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4': 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32': 'ACTION_RELOAD'}\n\n # Permissions\n WRITE_CAPABILITY = 'edit_suppressions'\n\n # Default Params\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n \n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n \n # Configuration key mapping\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n \n # Default Vals\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n\n DEFAULT_DISABLED = 0\n \n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n \n # set write capability\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY) \n \n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE: \n # Fill required params\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n \n # Fill valid params\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n \n def handleCreate(self, confInfo):\n '''Handles creation of a suppression.'''\n \n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n # Refresh\n self.handleReload()\n \n name = self.callerArgs.id\n args = self.callerArgs.data\n \n # Make sure the name is not empty\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\"The name of the suppression must not be empty\")\n \n # Make sure the name follows the convention\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n \n if not nameMatch:\n raise admin.ArgValidationException(\"The name of the suppression must follow proper convention\")\n \n # Make sure the item does not already exist\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\"A suppression entry already exists for %s\" % (name))\n \n # Get the field values\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n \n # Add the field values to a configuration dictionary (that will be verified)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self.getSessionKey())\n \n conf.namespace = self.appName # always save things to SOME app context.\n conf.owner = self.context == admin.CONTEXT_APP_AND_USER and self.userName or \"-\"\n \n conf['name'] = name\n \n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n \n ## Notable Suppression Audit Log Data\n log_data = {\n 'action': 'create',\n 'suppression': conf['name'][len(NotableEventSuppression.SUPPRESSION_START):],\n 'user': conf['eai:acl']['owner'],\n 'status': 'success',\n 'signature': 'Notable event suppression successfully created'\n }\n \n # Check the configuration\n try:\n Suppressions.checkConf(conf, name)\n \n except InvalidConfigException as e:\n e = \"The configuration for the new suppression '%s' is invalid and could not be created: %s\" % (name, str(e))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n raise admin.ArgValidationException(e)\n \n # Write out an update to the eventtypes config file\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n \n logger.info('Successfully added suppression: %s', name)\n \n # Reload suppressions\n self.handleReload()\n \n logger.info('%s completed successfully', actionStr)\n logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = NotableEventSuppression.disable_expired_suppressions(session_key=self.getSessionKey())\n logger.info(\"%s expired suppressions detected; %s were enabled (now disabled)\", expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo): \n \"\"\"\n Handles listing of a suppression\n \"\"\"\n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n self.handleReload()\n \n # Get the configurations from suppression.conf\n suppressionDict = self.readConfCtx('eventtypes')\n \n # Get all suppressions and provide the relevant options\n if suppressionDict != None:\n # Check each conf\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(stanza)\n \n if stanzaMatch:\n try:\n # Check config\n Suppressions.checkConf(settings, stanza, confInfo)\n \n except InvalidConfigException as e:\n logger.error(\"The configuration for suppression '%s' is invalid: %s\", stanza, str(e))\n \n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n logger.info('Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes', sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn('Could not refresh suppression configurations via properties endpoint: %s', str(e))\n \n logger.info('%s completed successfully', actionStr)\n \n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n \n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n # Refresh\n self.handleReload()\n \n name = self.callerArgs.id\n args = self.callerArgs\n \n if name is not None:\n # Make sure the name follows the convention\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n \n if not nameMatch:\n raise admin.ArgValidationException(\"The name of the suppression must follow proper convention\")\n \n try:\n conf = entity.getEntity('saved/eventtypes', name, sessionKey=self.getSessionKey())\n \n except ResourceNotFound:\n raise admin.NotFoundException(\"A suppression configuration with the given name '%s' could not be found\" % (name))\n \n else:\n # Stop if no name was provided\n raise admin.ArgValidationException(\"No name provided\")\n \n ## Notable Suppression Audit Log Data\n log_data = {\n 'status': 'success',\n 'action': 'edit',\n 'signature': 'Notable event suppression successfully saved',\n 'suppression': name[len(NotableEventSuppression.SUPPRESSION_START):],\n 'user': conf['eai:userName']\n }\n \n # Create the resulting configuration that would be persisted if the settings provided are applied\n for key, val in conf.items():\n if key in args.data:\n \n # Set the value to a single space so that the field is set to a blank value\n new_value = args[key][0]\n \n if new_value in [None, '']:\n new_value = ' '\n \n ## If a value other than the 'disabled' param is changed, it \n # came from the editor, otherwise the lister. \n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key], enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value, enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'] = 'disable' if new_value else 'enable'\n log_data['signature'] = 'Suppression successfully disabled' if new_value else 'Suppression successfully enabled'\n \n conf[key] = new_value\n \n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n \n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n \n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n \n try:\n # Check config\n Suppressions.checkConf(conf, name)\n \n except InvalidConfigException as e:\n e = \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\" % (name, str(e))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'] = 'Error occurred while enabling the suppression: ' + str(e)\n else:\n log_data['signature'] = 'Error occurred while disabling the suppression: ' + str(e)\n \n logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n raise admin.ArgValidationException(e)\n \n # Write out an update to the eventtypes config file\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n \n # Log that the suppression was updated\n logger.info(\"Successfully updated the '%s' suppression\", name)\n \n # Reload suppressions\n self.handleReload()\n \n logger.info('%s completed successfully', actionStr)\n \n logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n \n def handleRemove(self, confInfo):\n owner = ((self.context == admin.CONTEXT_APP_AND_USER) and self.userName) or \"-\"\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id, namespace=self.appName, owner=owner, sessionKey=self.getSessionKey())\n \n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\" \n # Below is a list of the required fields. The entries in this list will be removed as they\n # are observed. An empty list at the end of the config check indicates that all necessary\n # fields where provided.\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n \n if stanza is not None and confInfo is not None:\n # Add each of the settings\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n \n # Key is eai; Set meta \n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n \n # Key is eai; userName/appName\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n \n # Key is not proper\n else:\n pass\n \n # Check each of the settings individually\n logger.info(\"Checking general settings for the '%s' suppression\", stanza)\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n # Check the disabled/selected value\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n \n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n except ValueError:\n raise InvalidParameterValueException(key, val, \"must be a valid boolean\")\n \n elif key in Suppressions.REQUIRED_PARAMS:\n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n elif key in Suppressions.VALID_PARAMS:\n pass\n \n # Key is eai\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n \n # Key is not proper\n else:\n if throwExceptionOnError:\n raise UnsupportedParameterException()\n \n else:\n logger.warn(\"The configuration for '%s' contains an unsupported parameter: %s\", stanza, key)\n\n # Error if some of the required fields were not provided\n if len(required_fields) > 0:\n raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())\n\n \n# initialize the handler\nadmin.init(Suppressions, admin.CONTEXT_APP_AND_USER)",
"step-ids": [
14,
16,
18,
23,
26
]
}
|
[
14,
16,
18,
23,
26
] |
import markovify
import argparse
import sqlite3
import time
modelFile = './data/model.json'
corpusFile = './data/corpus.txt'
dbFile = './data/tweets.sqlite3'
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=.8)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", action="store_true", default=False, help="Create Model JSON")
parser.add_argument("--gen", action="store_true", default=False, help="Generate from stored Model")
parser.add_argument("--full", action="store_true", default=False, help="Full Geneate")
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
|
normal
|
{
"blob_id": "cc71c0cc1ec21dc465486fb5894c4d389c39bd62",
"index": 8164,
"step-1": "<mask token>\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-3": "<mask token>\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-4": "import markovify\nimport argparse\nimport sqlite3\nimport time\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-5": "import markovify\nimport argparse\nimport sqlite3\nimport time\n\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n\n if content:\n c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n\n conn.commit()\n\n conn.close()\n\ndef make_model():\n corpus = open(corpusFile).read()\n\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n\n model = markovify.Text(corpus, state_size=4)\n\n generate_count = 168\n\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=.8)\n generated_timestamp = int(time.time())\n\n if content:\n c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n\n conn.commit()\n\n conn.close()\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", action=\"store_true\", default=False, help=\"Create Model JSON\")\n parser.add_argument(\"--gen\", action=\"store_true\", default=False, help=\"Generate from stored Model\")\n parser.add_argument(\"--full\", action=\"store_true\", default=False, help=\"Full Geneate\")\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
NUM_CLASSES = 31
AUDIO_SR = 16000
AUDIO_LENGTH = 16000
LIBROSA_AUDIO_LENGTH = 22050
EPOCHS = 25
categories = {'stop': 0, 'nine': 1, 'off': 2, 'four': 3, 'right': 4,
'eight': 5, 'one': 6, 'bird': 7, 'dog': 8, 'no': 9, 'on': 10, 'seven':
11, 'cat': 12, 'left': 13, 'three': 14, 'tree': 15, 'bed': 16, 'zero':
17, 'happy': 18, 'sheila': 19, 'five': 20, 'down': 21, 'marvin': 22,
'six': 23, 'up': 24, 'wow': 25, 'house': 26, 'go': 27, 'yes': 28, 'two':
29, '_background_noise_': 30}
inv_categories = {(0): 'stop', (1): 'nine', (2): 'off', (3): 'four', (4):
'right', (5): 'eight', (6): 'one', (7): 'bird', (8): 'dog', (9): 'no',
(10): 'on', (11): 'seven', (12): 'cat', (13): 'left', (14): 'three', (
15): 'tree', (16): 'bed', (17): 'zero', (18): 'happy', (19): 'sheila',
(20): 'five', (21): 'down', (22): 'marvin', (23): 'six', (24): 'up', (
25): 'wow', (26): 'house', (27): 'go', (28): 'yes', (29): 'two', (30):
'_background_noise_'}
INPUT_SHAPE = 99, 40
TARGET_SHAPE = 99, 40, 1
PARSE_PARAMS = 0.025, 0.01, 40
filters = [16, 32, 64, 128, 256]
DROPOUT = 0.25
KERNEL_SIZE = 3, 3
POOL_SIZE = 2, 2
DENSE_1 = 512
DENSE_2 = 256
BATCH_SIZE = 128
PATIENCE = 5
LEARNING_RATE = 0.001
<|reserved_special_token_1|>
NUM_CLASSES = 31
AUDIO_SR = 16000
AUDIO_LENGTH = 16000
LIBROSA_AUDIO_LENGTH = 22050
EPOCHS = 25
categories = {
'stop': 0,
'nine': 1,
'off': 2,
'four': 3,
'right': 4,
'eight': 5,
'one': 6,
'bird': 7,
'dog': 8,
'no': 9,
'on': 10,
'seven': 11,
'cat': 12,
'left': 13,
'three': 14,
'tree': 15,
'bed': 16,
'zero': 17,
'happy': 18,
'sheila': 19,
'five': 20,
'down': 21,
'marvin': 22,
'six': 23,
'up': 24,
'wow': 25,
'house': 26,
'go': 27,
'yes': 28,
'two': 29,
'_background_noise_': 30,
}
inv_categories = {
0: 'stop',
1: 'nine',
2: 'off',
3: 'four',
4: 'right',
5: 'eight',
6: 'one',
7: 'bird',
8: 'dog',
9: 'no',
10: 'on',
11: 'seven',
12: 'cat',
13: 'left',
14: 'three',
15: 'tree',
16: 'bed',
17: 'zero',
18: 'happy',
19: 'sheila',
20: 'five',
21: 'down',
22: 'marvin',
23: 'six',
24: 'up',
25: 'wow',
26: 'house',
27: 'go',
28: 'yes',
29: 'two',
30: '_background_noise_'
}
# Marvin model
INPUT_SHAPE = (99, 40)
TARGET_SHAPE = (99, 40, 1)
PARSE_PARAMS = (0.025, 0.01, 40)
filters = [16, 32, 64, 128, 256]
DROPOUT = 0.25
KERNEL_SIZE = (3, 3)
POOL_SIZE = (2, 2)
DENSE_1 = 512
DENSE_2 = 256
BATCH_SIZE = 128
PATIENCE = 5
LEARNING_RATE = 0.001
|
flexible
|
{
"blob_id": "6a9e18cde94258b01a37f459eceaac58118b4976",
"index": 5813,
"step-1": "<mask token>\n",
"step-2": "NUM_CLASSES = 31\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\nEPOCHS = 25\ncategories = {'stop': 0, 'nine': 1, 'off': 2, 'four': 3, 'right': 4,\n 'eight': 5, 'one': 6, 'bird': 7, 'dog': 8, 'no': 9, 'on': 10, 'seven': \n 11, 'cat': 12, 'left': 13, 'three': 14, 'tree': 15, 'bed': 16, 'zero': \n 17, 'happy': 18, 'sheila': 19, 'five': 20, 'down': 21, 'marvin': 22,\n 'six': 23, 'up': 24, 'wow': 25, 'house': 26, 'go': 27, 'yes': 28, 'two':\n 29, '_background_noise_': 30}\ninv_categories = {(0): 'stop', (1): 'nine', (2): 'off', (3): 'four', (4):\n 'right', (5): 'eight', (6): 'one', (7): 'bird', (8): 'dog', (9): 'no',\n (10): 'on', (11): 'seven', (12): 'cat', (13): 'left', (14): 'three', (\n 15): 'tree', (16): 'bed', (17): 'zero', (18): 'happy', (19): 'sheila',\n (20): 'five', (21): 'down', (22): 'marvin', (23): 'six', (24): 'up', (\n 25): 'wow', (26): 'house', (27): 'go', (28): 'yes', (29): 'two', (30):\n '_background_noise_'}\nINPUT_SHAPE = 99, 40\nTARGET_SHAPE = 99, 40, 1\nPARSE_PARAMS = 0.025, 0.01, 40\nfilters = [16, 32, 64, 128, 256]\nDROPOUT = 0.25\nKERNEL_SIZE = 3, 3\nPOOL_SIZE = 2, 2\nDENSE_1 = 512\nDENSE_2 = 256\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-3": "NUM_CLASSES = 31\n\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\n\nEPOCHS = 25\n\ncategories = {\n 'stop': 0,\n 'nine': 1,\n 'off': 2,\n 'four': 3,\n 'right': 4,\n 'eight': 5,\n 'one': 6,\n 'bird': 7,\n 'dog': 8,\n 'no': 9,\n 'on': 10,\n 'seven': 11,\n 'cat': 12,\n 'left': 13,\n 'three': 14,\n 'tree': 15,\n 'bed': 16,\n 'zero': 17,\n 'happy': 18,\n 'sheila': 19,\n 'five': 20,\n 'down': 21,\n 'marvin': 22,\n 'six': 23,\n 'up': 24,\n 'wow': 25,\n 'house': 26,\n 'go': 27,\n 'yes': 28,\n 'two': 29,\n '_background_noise_': 30,\n}\n\n\ninv_categories = {\n 0: 'stop',\n 1: 'nine',\n 2: 'off',\n 3: 'four',\n 4: 'right',\n 5: 'eight',\n 6: 'one',\n 7: 'bird',\n 8: 'dog',\n 9: 'no',\n 10: 'on',\n 11: 'seven',\n 12: 'cat',\n 13: 'left',\n 14: 'three',\n 15: 'tree',\n 16: 'bed',\n 17: 'zero',\n 18: 'happy',\n 19: 'sheila',\n 20: 'five',\n 21: 'down',\n 22: 'marvin',\n 23: 'six',\n 24: 'up',\n 25: 'wow',\n 26: 'house',\n 27: 'go',\n 28: 'yes',\n 29: 'two',\n 30: '_background_noise_'\n }\n\n# Marvin model\nINPUT_SHAPE = (99, 40)\nTARGET_SHAPE = (99, 40, 1)\nPARSE_PARAMS = (0.025, 0.01, 40)\nfilters = [16, 32, 64, 128, 256]\n\nDROPOUT = 0.25\nKERNEL_SIZE = (3, 3)\nPOOL_SIZE = (2, 2)\nDENSE_1 = 512\nDENSE_2 = 256\n\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Date:
def __init__(self, strDate):
strDate = strDate.split('.')
self.day = strDate[0]
self.month = strDate[1]
self.year = strDate[2]
|
normal
|
{
"blob_id": "805fc9a26650f85227d14da972311ffbd9dbd555",
"index": 16,
"step-1": "<mask token>\n",
"step-2": "class Date:\n <mask token>\n",
"step-3": "class Date:\n\n def __init__(self, strDate):\n strDate = strDate.split('.')\n self.day = strDate[0]\n self.month = strDate[1]\n self.year = strDate[2]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class imageAnalyzer:
<|reserved_special_token_0|>
def getImage(self, img_number):
temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')
img = requests.get(self.url + '/image')
temp.write(img.content)
temp.close()
def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]
), numpy.array([40, 255, 255]))):
img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img, (7, 7), 8)
except:
pass
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0], thresholds[1])
return ret, orig
def findBoundingBoxes(self, img, orig=None, area_thresh=100,
aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):
con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and
aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *
y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))
for b in bound:
cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)
cv2.imwrite('vis{}.jpg'.format(0), orig)
return bound
def approx_distance(self, duckie_boxes, dist_half_screen=5,
camera_y_res=480):
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen * (1 / 2) * (
camera_y_res / box[3]))
distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /
box[3]))) for box in duckie_boxes]
return distances
def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, 'w')
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class imageAnalyzer:
def __init__(self, roverName='Rover03', url=
'http://192.168.1.10:5000/api/', temp_img_path='./temp'):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self, img_number):
temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')
img = requests.get(self.url + '/image')
temp.write(img.content)
temp.close()
def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]
), numpy.array([40, 255, 255]))):
img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img, (7, 7), 8)
except:
pass
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0], thresholds[1])
return ret, orig
def findBoundingBoxes(self, img, orig=None, area_thresh=100,
aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):
con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and
aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *
y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))
for b in bound:
cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)
cv2.imwrite('vis{}.jpg'.format(0), orig)
return bound
def approx_distance(self, duckie_boxes, dist_half_screen=5,
camera_y_res=480):
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen * (1 / 2) * (
camera_y_res / box[3]))
distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /
box[3]))) for box in duckie_boxes]
return distances
def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, 'w')
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
<|reserved_special_token_0|>
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class imageAnalyzer:
def __init__(self, roverName='Rover03', url=
'http://192.168.1.10:5000/api/', temp_img_path='./temp'):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self, img_number):
temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')
img = requests.get(self.url + '/image')
temp.write(img.content)
temp.close()
def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]
), numpy.array([40, 255, 255]))):
img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img, (7, 7), 8)
except:
pass
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0], thresholds[1])
return ret, orig
def findBoundingBoxes(self, img, orig=None, area_thresh=100,
aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):
con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and
aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *
y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))
for b in bound:
cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)
cv2.imwrite('vis{}.jpg'.format(0), orig)
return bound
def approx_distance(self, duckie_boxes, dist_half_screen=5,
camera_y_res=480):
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen * (1 / 2) * (
camera_y_res / box[3]))
distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /
box[3]))) for box in duckie_boxes]
return distances
def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, 'w')
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
analyzer = imageAnalyzer()
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
<|reserved_special_token_1|>
import requests, cv2, numpy, time, imutils
class imageAnalyzer:
def __init__(self, roverName='Rover03', url=
'http://192.168.1.10:5000/api/', temp_img_path='./temp'):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self, img_number):
temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')
img = requests.get(self.url + '/image')
temp.write(img.content)
temp.close()
def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]
), numpy.array([40, 255, 255]))):
img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img, (7, 7), 8)
except:
pass
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0], thresholds[1])
return ret, orig
def findBoundingBoxes(self, img, orig=None, area_thresh=100,
aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):
con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and
aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *
y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))
for b in bound:
cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)
cv2.imwrite('vis{}.jpg'.format(0), orig)
return bound
def approx_distance(self, duckie_boxes, dist_half_screen=5,
camera_y_res=480):
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen * (1 / 2) * (
camera_y_res / box[3]))
distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /
box[3]))) for box in duckie_boxes]
return distances
def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, 'w')
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
analyzer = imageAnalyzer()
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
<|reserved_special_token_1|>
import requests,cv2,numpy,time,imutils
class imageAnalyzer():
def __init__(self,
roverName="Rover03",
url="http://192.168.1.10:5000/api/",
temp_img_path = "./temp",
):
self.url = url + roverName
self.temp_img_path = temp_img_path
def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg
temp = open(self.temp_img_path + str(img_number) + ".jpeg", "wb")
img = requests.get(self.url + "/image")
temp.write(img.content)
temp.close()
def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds
img = cv2.imread(self.temp_img_path + str(img_number) + ".jpeg")
orig = numpy.copy(img)
try:
img = cv2.GaussianBlur(img,(7,7),8)
except:
pass
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
ret = cv2.inRange(hsv, thresholds[0],thresholds[1])
return ret,orig
def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion
con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
con = imutils.grab_contours(con)
if orig.any():
cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)
bound = []
for c in con:
bound.append(cv2.boundingRect(c))
bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio
for b in bound:
cv2.rectangle(orig,b,color=(0,0,255),thickness=2)
cv2.imwrite("vis{}.jpg".format(0),orig)
return bound
def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.
distances = {}
print(duckie_boxes)
for box in duckie_boxes:
distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))
distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)
return distances
def capture(self,temp_image=0,db_file="temp_duck_boxes.txt"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file
self.getImage(temp_image)
ret = self.analyzeHSV(temp_image)
boxes = self.findBoundingBoxes(ret[0], ret[1])
duck_box_file = open(db_file, "w")
dist = analyzer.approx_distance(boxes)
duck_box_file.write(str(dist))
duck_box_file.close()
return boxes, dist
analyzer = imageAnalyzer()
while True:
boxes, dist = analyzer.capture()
time.sleep(0.5)
|
flexible
|
{
"blob_id": "7d3264e9a90ebd72439f77983cbf4f9755048a85",
"index": 4300,
"step-1": "<mask token>\n\n\nclass imageAnalyzer:\n <mask token>\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\n<mask token>\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-3": "<mask token>\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-4": "import requests, cv2, numpy, time, imutils\n\n\nclass imageAnalyzer:\n\n def __init__(self, roverName='Rover03', url=\n 'http://192.168.1.10:5000/api/', temp_img_path='./temp'):\n self.url = url + roverName\n self.temp_img_path = temp_img_path\n\n def getImage(self, img_number):\n temp = open(self.temp_img_path + str(img_number) + '.jpeg', 'wb')\n img = requests.get(self.url + '/image')\n temp.write(img.content)\n temp.close()\n\n def analyzeHSV(self, img_number, thresholds=(numpy.array([20, 100, 110]\n ), numpy.array([40, 255, 255]))):\n img = cv2.imread(self.temp_img_path + str(img_number) + '.jpeg')\n orig = numpy.copy(img)\n try:\n img = cv2.GaussianBlur(img, (7, 7), 8)\n except:\n pass\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n ret = cv2.inRange(hsv, thresholds[0], thresholds[1])\n return ret, orig\n\n def findBoundingBoxes(self, img, orig=None, area_thresh=100,\n aspect_thresh=[0.8, 1.0], y_threshold=[0, 0.6]):\n con = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n con = imutils.grab_contours(con)\n if orig.any():\n cv2.drawContours(orig, con, -1, (255, 255, 255), thickness=2)\n bound = []\n for c in con:\n bound.append(cv2.boundingRect(c))\n bound = list(filter(lambda x: x[2] * x[3] >= area_thresh and \n aspect_thresh[0] <= x[3] / x[2] <= aspect_thresh[1] and 480 *\n y_threshold[0] <= 480 - x[1] <= 480 * y_threshold[1], bound))\n for b in bound:\n cv2.rectangle(orig, b, color=(0, 0, 255), thickness=2)\n cv2.imwrite('vis{}.jpg'.format(0), orig)\n return bound\n\n def approx_distance(self, duckie_boxes, dist_half_screen=5,\n camera_y_res=480):\n distances = {}\n print(duckie_boxes)\n for box in duckie_boxes:\n distances[box] = round(dist_half_screen * (1 / 2) * (\n camera_y_res / box[3]))\n distances = [(box, round(dist_half_screen * (1 / 2) * (camera_y_res /\n box[3]))) for box in duckie_boxes]\n return distances\n\n def capture(self, temp_image=0, db_file='temp_duck_boxes.txt'):\n self.getImage(temp_image)\n ret = self.analyzeHSV(temp_image)\n boxes = self.findBoundingBoxes(ret[0], ret[1])\n duck_box_file = open(db_file, 'w')\n dist = analyzer.approx_distance(boxes)\n duck_box_file.write(str(dist))\n duck_box_file.close()\n return boxes, dist\n\n\nanalyzer = imageAnalyzer()\nwhile True:\n boxes, dist = analyzer.capture()\n time.sleep(0.5)\n",
"step-5": "import requests,cv2,numpy,time,imutils\r\n\r\nclass imageAnalyzer():\r\n\r\n def __init__(self,\r\n roverName=\"Rover03\",\r\n url=\"http://192.168.1.10:5000/api/\",\r\n temp_img_path = \"./temp\",\r\n ):\r\n\r\n self.url = url + roverName\r\n\r\n self.temp_img_path = temp_img_path\r\n\r\n def getImage(self,img_number): # gets image from camera and saves it as temp(img_number).jpeg\r\n\r\n temp = open(self.temp_img_path + str(img_number) + \".jpeg\", \"wb\")\r\n\r\n img = requests.get(self.url + \"/image\")\r\n\r\n temp.write(img.content)\r\n\r\n temp.close()\r\n\r\n def analyzeHSV(self,img_number,thresholds=(numpy.array([20,100,110]),numpy.array([40,255,255]))): # min, max, creates mask from HSV thresholds\r\n\r\n img = cv2.imread(self.temp_img_path + str(img_number) + \".jpeg\")\r\n\r\n orig = numpy.copy(img)\r\n\r\n try:\r\n\r\n img = cv2.GaussianBlur(img,(7,7),8)\r\n\r\n except:\r\n\r\n pass\r\n\r\n hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\r\n\r\n ret = cv2.inRange(hsv, thresholds[0],thresholds[1])\r\n\r\n return ret,orig\r\n\r\n def findBoundingBoxes(self,img,orig=None,area_thresh=100,aspect_thresh=[0.8,1.0],y_threshold=[0,0.6]): # finds contours from mask and determines bound boxes, vetoes by minimum box area, aspect ratio and vertical screen portion\r\n\r\n con = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n con = imutils.grab_contours(con)\r\n\r\n if orig.any():\r\n\r\n cv2.drawContours(orig, con, -1, (255, 255, 255),thickness=2)\r\n\r\n bound = []\r\n\r\n for c in con:\r\n\r\n bound.append(cv2.boundingRect(c))\r\n\r\n bound = list(filter(lambda x: (x[2]*x[3] >= area_thresh) and (aspect_thresh[0] <= x[3]/x[2] <= aspect_thresh[1]) and 480*y_threshold[0] <= 480-x[1] <= 480*y_threshold[1], bound)) # vetoing based on minimal bounding box area, relative position in image and aspect ratio\r\n\r\n for b in bound:\r\n\r\n cv2.rectangle(orig,b,color=(0,0,255),thickness=2)\r\n\r\n cv2.imwrite(\"vis{}.jpg\".format(0),orig)\r\n\r\n return bound\r\n\r\n def approx_distance(self,duckie_boxes,dist_half_screen=5,camera_y_res=480): # bounding boxes of ducks, calibration: distance in cm from camera to center of duck for duck to take up half of camera image height assuming duck size = const.\r\n\r\n distances = {}\r\n\r\n print(duckie_boxes)\r\n\r\n for box in duckie_boxes:\r\n\r\n distances[box] = round(dist_half_screen*(1/2)*(camera_y_res/box[3]))\r\n\r\n distances = [ (box, round(dist_half_screen*(1/2)*(camera_y_res/box[3]) ) ) for box in duckie_boxes] # NOTE: Y coordinate origin is from the top of the image, returns list of (rect=(x_anchor,y_anchor,x_size,y_size),distance) tuple-value pairs (note,y_size goes downwards!)\r\n\r\n return distances\r\n\r\n def capture(self,temp_image=0,db_file=\"temp_duck_boxes.txt\"): # gets image, returns bounding boxes and distances according to NOTE, creates temp images temp(n) and vis(n) with n = temp_image argument as well as distance text file\r\n\r\n self.getImage(temp_image)\r\n\r\n ret = self.analyzeHSV(temp_image)\r\n\r\n boxes = self.findBoundingBoxes(ret[0], ret[1])\r\n\r\n duck_box_file = open(db_file, \"w\")\r\n\r\n dist = analyzer.approx_distance(boxes)\r\n\r\n duck_box_file.write(str(dist))\r\n\r\n duck_box_file.close()\r\n\r\n return boxes, dist\r\n\r\n\r\nanalyzer = imageAnalyzer()\r\n\r\nwhile True:\r\n\r\n boxes, dist = analyzer.capture()\r\n\r\n time.sleep(0.5)\r\n\r\n\r\n\r\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
import requests
import toml
from pathlib import Path
imgs:list
config:dict
def parseTex(lines:list):
new_lines = []
for i, line in enumerate(lines):
if line == "\n":
continue
inline = False
if (line[0] == "$" and line[1] != "$"):
inline = True
line = line.replace("$", "")
line = line.replace("\n", "")
line = line.replace(" ", "&space;")
line = line.replace("+", "+")
new_lines.append((line, inline))
return new_lines
def addColor(lines:list, color:str):
colortag = "{\color[RGB]{" + color + "}"
return ["""\inline""" + colortag + line[0] + "}" if(line[1]) else colortag + line[0] + "}" for line in lines]
if Path("config.toml").exists():
with open("config.toml", "r") as loadconfig:
config = toml.load(loadconfig)
if config == {}:
config = {"colors": ["0, 0, 0"], "outputs": [""]}
else:
config = {"colors": ["0, 0, 0"], "outputs": [""]}
with open("tex.txt", "r") as tex:
imgs = tex.readlines()
imgs = parseTex(imgs) #returns a list of tuples, [0] is the parsed text, [1] is an inline boolean
for i, color in enumerate(config["colors"]):
coloredimgs = addColor(imgs, color)
output = "output" / Path(config["outputs"][i])
if (not output.exists()):
output.mkdir()
for j, tex in enumerate(coloredimgs):
link = "https://latex.codecogs.com/svg.latex?" + tex
print(link)
r = requests.get(link)
with open(output / ("latex" + str(j) + ".svg"), "wb") as svg:
svg.write(r.content)
|
normal
|
{
"blob_id": "dbd04f7b88fa43ae920a6744e3979dbf917d3fc6",
"index": 7649,
"step-1": "<mask token>\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '+')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\n<mask token>\n",
"step-2": "<mask token>\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '+')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\n<mask token>\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n",
"step-3": "<mask token>\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '+')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\nimgs = parseTex(imgs)\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n",
"step-4": "import requests\nimport toml\nfrom pathlib import Path\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '+')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\nimgs = parseTex(imgs)\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n",
"step-5": "import requests\nimport toml\nfrom pathlib import Path\n\nimgs:list\nconfig:dict\n\ndef parseTex(lines:list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == \"\\n\":\n continue\n\n inline = False\n if (line[0] == \"$\" and line[1] != \"$\"):\n inline = True\n line = line.replace(\"$\", \"\")\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\" \", \"&space;\")\n line = line.replace(\"+\", \"+\")\n new_lines.append((line, inline))\n return new_lines\n\ndef addColor(lines:list, color:str):\n colortag = \"{\\color[RGB]{\" + color + \"}\"\n return [\"\"\"\\inline\"\"\" + colortag + line[0] + \"}\" if(line[1]) else colortag + line[0] + \"}\" for line in lines]\n\n\n\n\nif Path(\"config.toml\").exists():\n with open(\"config.toml\", \"r\") as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {\"colors\": [\"0, 0, 0\"], \"outputs\": [\"\"]}\nelse:\n config = {\"colors\": [\"0, 0, 0\"], \"outputs\": [\"\"]}\n\nwith open(\"tex.txt\", \"r\") as tex:\n imgs = tex.readlines()\n\nimgs = parseTex(imgs) #returns a list of tuples, [0] is the parsed text, [1] is an inline boolean\nfor i, color in enumerate(config[\"colors\"]):\n coloredimgs = addColor(imgs, color)\n output = \"output\" / Path(config[\"outputs\"][i])\n if (not output.exists()):\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = \"https://latex.codecogs.com/svg.latex?\" + tex\n print(link)\n r = requests.get(link)\n with open(output / (\"latex\" + str(j) + \".svg\"), \"wb\") as svg:\n svg.write(r.content)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
exec(open('docker_zabbix_script_sender/version.py').read())
setup(name=NAME, version=version, author='Cyril Moreau', author_email=
'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,
download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,
version), description=
'Push Docker containers script results to Zabbix efficiently',
long_description=dedent(
"""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""
), keywords='docker zabbix monitoring', packages=[
'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],
zip_safe=False, license='Apache license version 2.0', classifiers=[
'Development Status :: 4 - Beta', 'Environment :: Other Environment',
'Intended Audience :: Developers', 'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4', 'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License'], entry_points=
"""
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
NAME = 'docker-zabbix-script-sender'
GITHUB_ORG_URL = 'https://github.com/troptop/'
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(name=NAME, version=version, author='Cyril Moreau', author_email=
'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,
download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,
version), description=
'Push Docker containers script results to Zabbix efficiently',
long_description=dedent(
"""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""
), keywords='docker zabbix monitoring', packages=[
'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],
zip_safe=False, license='Apache license version 2.0', classifiers=[
'Development Status :: 4 - Beta', 'Environment :: Other Environment',
'Intended Audience :: Developers', 'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4', 'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License'], entry_points=
"""
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
<|reserved_special_token_1|>
import os
import sys
from setuptools import setup
from textwrap import dedent
NAME = 'docker-zabbix-script-sender'
GITHUB_ORG_URL = 'https://github.com/troptop/'
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(name=NAME, version=version, author='Cyril Moreau', author_email=
'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,
download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,
version), description=
'Push Docker containers script results to Zabbix efficiently',
long_description=dedent(
"""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""
), keywords='docker zabbix monitoring', packages=[
'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],
zip_safe=False, license='Apache license version 2.0', classifiers=[
'Development Status :: 4 - Beta', 'Environment :: Other Environment',
'Intended Audience :: Developers', 'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4', 'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License'], entry_points=
"""
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
<|reserved_special_token_1|>
#!/usr/bin/env python
import os
import sys
from setuptools import setup
from textwrap import dedent
NAME = "docker-zabbix-script-sender"
GITHUB_ORG_URL = "https://github.com/troptop/"
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
exec(open('docker_zabbix_script_sender/version.py').read())
setup(
name=NAME,
version=version,
author="Cyril Moreau",
author_email="cyril.moreauu@gmail.com",
url= GITHUB_ORG_URL + '/' + NAME,
download_url="{0}/{1}/tarball/v{2}".format(GITHUB_ORG_URL, NAME, version),
description="Push Docker containers script results to Zabbix efficiently",
long_description=dedent("""
Rationale
---------
Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.
It leverages 3 interesting components:
- Zabbix maintains a tool titled ``zabbix-sender``.
It is meant to push `Zabbix trapper items`_ efficiently.
- Develop your own scripts to monitor your docker container
- Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.
It allows the client to subscribe to a live feed delivering a container statistics.
The daemon script stands in the middle of those 3 components.
It collects Docker containers statistics and transforms them in Zabbix trapper events.
Published metrics
-----------------
The daemon script does not publish any statistic yet.
You have to develop your own script
Documentation
-------------
The stable documentation is available on ReadTheDocs_
"""),
keywords="docker zabbix monitoring",
packages=['docker_zabbix_script_sender'],
install_requires=[
'docker-py >= 1.0.0',
],
zip_safe=False,
license="Apache license version 2.0",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
entry_points = """
[console_scripts]
docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run
"""
)
|
flexible
|
{
"blob_id": "0769003c248c099da5bcd75541d35234b01af5de",
"index": 2723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n 'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-3": "<mask token>\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n 'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-4": "import os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\nNAME = 'docker-zabbix-script-sender'\nGITHUB_ORG_URL = 'https://github.com/troptop/'\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\nexec(open('docker_zabbix_script_sender/version.py').read())\nsetup(name=NAME, version=version, author='Cyril Moreau', author_email=\n 'cyril.moreauu@gmail.com', url=GITHUB_ORG_URL + '/' + NAME,\n download_url='{0}/{1}/tarball/v{2}'.format(GITHUB_ORG_URL, NAME,\n version), description=\n 'Push Docker containers script results to Zabbix efficiently',\n long_description=dedent(\n \"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"\n ), keywords='docker zabbix monitoring', packages=[\n 'docker_zabbix_script_sender'], install_requires=['docker-py >= 1.0.0'],\n zip_safe=False, license='Apache license version 2.0', classifiers=[\n 'Development Status :: 4 - Beta', 'Environment :: Other Environment',\n 'Intended Audience :: Developers', 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4', 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License'], entry_points=\n \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n )\n",
"step-5": "#!/usr/bin/env python\nimport os\nimport sys\nfrom setuptools import setup\nfrom textwrap import dedent\n\nNAME = \"docker-zabbix-script-sender\"\nGITHUB_ORG_URL = \"https://github.com/troptop/\"\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nexec(open('docker_zabbix_script_sender/version.py').read())\n\nsetup(\n name=NAME,\n version=version,\n author=\"Cyril Moreau\",\n author_email=\"cyril.moreauu@gmail.com\",\n url= GITHUB_ORG_URL + '/' + NAME,\n download_url=\"{0}/{1}/tarball/v{2}\".format(GITHUB_ORG_URL, NAME, version),\n description=\"Push Docker containers script results to Zabbix efficiently\",\n long_description=dedent(\"\"\"\n Rationale\n ---------\n Docker Zabbix Sender delivers a daemon script that push to Zabbix statistics about Docker containers.\n\n It leverages 3 interesting components:\n\n - Zabbix maintains a tool titled ``zabbix-sender``.\n It is meant to push `Zabbix trapper items`_ efficiently.\n\n\t- Develop your own scripts to monitor your docker container\n\n - Docker 1.5.0 comes with Docker Remote API version 17, providing a new `stats endpoint`_.\n It allows the client to subscribe to a live feed delivering a container statistics.\n\n The daemon script stands in the middle of those 3 components.\n It collects Docker containers statistics and transforms them in Zabbix trapper events.\n\n Published metrics\n -----------------\n The daemon script does not publish any statistic yet.\n\tYou have to develop your own script\n\n Documentation\n -------------\n The stable documentation is available on ReadTheDocs_\n\n \"\"\"),\n keywords=\"docker zabbix monitoring\",\n packages=['docker_zabbix_script_sender'],\n install_requires=[\n 'docker-py >= 1.0.0',\n ],\n zip_safe=False,\n license=\"Apache license version 2.0\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n entry_points = \"\"\"\n [console_scripts]\n docker-zabbix-script-sender = docker_zabbix_script_sender.zabbix_sender:run\n \"\"\"\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from collections import defaultdict
def k_most_frequent(arr:list, k:int):
''' '''
counts = defaultdict(int)
for n in nums:
counts[n] += 1
counts = [(k,v) for k,v in counts.items()]
ordered = list(reversed(sorted(counts, key=lambda d: d[1])))
return [o[0] for o in ordered[:k]]
nums = [1,6,2,1,6,1,4,2,6,1]
k_most_frequent(nums, 3)
|
normal
|
{
"blob_id": "1298c2abae519a5365cc0d9d406196db987eb219",
"index": 5923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\n<mask token>\nk_most_frequent(nums, 3)\n",
"step-3": "<mask token>\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\nnums = [1, 6, 2, 1, 6, 1, 4, 2, 6, 1]\nk_most_frequent(nums, 3)\n",
"step-4": "from collections import defaultdict\n\n\ndef k_most_frequent(arr: list, k: int):\n \"\"\" \"\"\"\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n counts = [(k, v) for k, v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\n\nnums = [1, 6, 2, 1, 6, 1, 4, 2, 6, 1]\nk_most_frequent(nums, 3)\n",
"step-5": "\nfrom collections import defaultdict\n\ndef k_most_frequent(arr:list, k:int):\n ''' '''\n counts = defaultdict(int)\n for n in nums:\n counts[n] += 1\n \n counts = [(k,v) for k,v in counts.items()]\n ordered = list(reversed(sorted(counts, key=lambda d: d[1])))\n return [o[0] for o in ordered[:k]]\n\nnums = [1,6,2,1,6,1,4,2,6,1]\nk_most_frequent(nums, 3)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
ANCHO = 600
ALTO = 800
|
flexible
|
{
"blob_id": "71ca67948100fb7ad388934740cead1ebe4a2b52",
"index": 8549,
"step-1": "<mask token>\n",
"step-2": "ANCHO = 600\nALTO = 800\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 18:50:16 2018
@author: User
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 19:05:42 2018
@author: User
"""
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import lxml
import html5lib
import csv
path = 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm'
path1='E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'
#extracting the summary compensation table from html file
dfhtml = pd.read_html(path,match="Bonus")
len(dfhtml)
dfhtml
type(dfhtml)
#Converting list to string and removing the NaN
htmltxt=str(dfhtml)
txtnew=htmltxt.replace("NaN","")
print(txtnew)
#writing the list to text file
f=open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt','w')
f.writelines(str(txtnew))
f.close()
#df1=dfhtml[0].replace(np.NaN,np.nan)
df2=dfhtml[0].dropna(axis=1, how='all')
df2=df2.dropna(thresh=1)
#df2.iloc[0:2,:] # Displaying the Rows with the Titles only.
|
normal
|
{
"blob_id": "c7768e44464703552f579a1ec68b58fd9746a381",
"index": 8743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\n<mask token>\nprint(txtnew)\n<mask token>\nf.writelines(str(txtnew))\nf.close()\n<mask token>\n",
"step-3": "<mask token>\npath = (\n 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm')\npath1 = 'E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\ndfhtml = pd.read_html(path, match='Bonus')\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\nhtmltxt = str(dfhtml)\ntxtnew = htmltxt.replace('NaN', '')\nprint(txtnew)\nf = open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt', 'w')\nf.writelines(str(txtnew))\nf.close()\ndf2 = dfhtml[0].dropna(axis=1, how='all')\ndf2 = df2.dropna(thresh=1)\n",
"step-4": "<mask token>\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport lxml\nimport html5lib\nimport csv\npath = (\n 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm')\npath1 = 'E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\ndfhtml = pd.read_html(path, match='Bonus')\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\nhtmltxt = str(dfhtml)\ntxtnew = htmltxt.replace('NaN', '')\nprint(txtnew)\nf = open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt', 'w')\nf.writelines(str(txtnew))\nf.close()\ndf2 = dfhtml[0].dropna(axis=1, how='all')\ndf2 = df2.dropna(thresh=1)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 24 18:50:16 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 19:05:42 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport numpy as np\r\nimport lxml\r\nimport html5lib\r\nimport csv\r\n\r\npath = 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm'\r\npath1='E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\r\n\r\n#extracting the summary compensation table from html file\r\ndfhtml = pd.read_html(path,match=\"Bonus\")\r\nlen(dfhtml)\r\ndfhtml\r\ntype(dfhtml)\r\n\r\n#Converting list to string and removing the NaN\r\nhtmltxt=str(dfhtml)\r\ntxtnew=htmltxt.replace(\"NaN\",\"\")\r\nprint(txtnew)\r\n\r\n#writing the list to text file\r\nf=open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt','w')\r\nf.writelines(str(txtnew))\r\nf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#df1=dfhtml[0].replace(np.NaN,np.nan)\r\ndf2=dfhtml[0].dropna(axis=1, how='all') \r\ndf2=df2.dropna(thresh=1)\r\n#df2.iloc[0:2,:] # Displaying the Rows with the Titles only.\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from threading import Thread, Lock
from utils import reloj
import random
class Imprimidor(Thread):
def __init__(self, nombre, berlin, bolsa_dinero):
super().__init__()
pass
def run(self):
'''
Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada
iteracion chequea si se cumple que hay problema con el dinero (20%)
'''
pass
def imprimir_dinero(self, dinero):
'''
Llamar a este método para imprimir dinero.
***Acá debes procurarte de evitar errores de concurrencia***
:param dinero:
:return:
'''
pass
def problema_papel(self):
'''
Probabilidad de problema con el papel de 20%
'''
pass
|
normal
|
{
"blob_id": "ab79e2f9584dbbb526c62bde882a1bc9874b56f9",
"index": 7903,
"step-1": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n <mask token>\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-4": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n \"\"\"\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n \"\"\"\n pass\n\n def imprimir_dinero(self, dinero):\n \"\"\"\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n \"\"\"\n pass\n\n def problema_papel(self):\n \"\"\"\n Probabilidad de problema con el papel de 20%\n \"\"\"\n pass\n",
"step-5": "from threading import Thread, Lock\nfrom utils import reloj\nimport random\n\n\nclass Imprimidor(Thread):\n\n def __init__(self, nombre, berlin, bolsa_dinero):\n super().__init__()\n pass\n\n def run(self):\n '''\n Funcionalidad de iMPRIMIDOR que imprime dinero cada 5 minutos, cada\n iteracion chequea si se cumple que hay problema con el dinero (20%)\n '''\n pass\n\n def imprimir_dinero(self, dinero):\n '''\n Llamar a este método para imprimir dinero.\n ***Acá debes procurarte de evitar errores de concurrencia***\n :param dinero:\n :return:\n '''\n pass\n\n def problema_papel(self):\n '''\n Probabilidad de problema con el papel de 20%\n '''\n pass\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import requests
from urllib.parse import urlparse, urlencode
from json import JSONDecodeError
from requests.exceptions import HTTPError
def validate_response(response):
"""
raise exception if error response occurred
"""
r = response
try:
r.raise_for_status()
except HTTPError as e:
message = dict(status_code=r.status_code, exception=e)
try:
response = r.json()
message['response'] = response
except JSONDecodeError as e:
message['response'] = r.content
raise HTTPError(message)
class CpmsConnector:
"""The CpmsConnector object allow you communicate through
cpms between application.
"""
ORDER_STATUS = ('NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR')
def __init__(self, config):
"""initialize with config
config(dict): must supply username, api_key, api_url
"""
self.username = config['username']
self.api_key = config['api_key']
self.api_url = config['api_url']
self._token = None
self._set_token()
@property
def _fulfillment_url(self):
netloc = f'fulfillment.{urlparse(self.api_url).netloc}'
return urlparse(self.api_url)._replace(netloc=netloc).geturl()
def _update_headers(self, token):
self.headers = {
'X-Subject-Token': token
}
@property
def token(self):
return self._token
def _set_token(self):
path = '/identity/token'
payload = {
"auth":
{
"apiKeyCredentials":
{
"username": self.username,
"apiKey": self.api_key
}
}
}
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.post(url, json=payload)
validate_response(r)
token = r.json()['token']['token_id']
self._update_headers(token)
self._token = token
def get_order(self, channel_id, order_id):
"""retrieve single order of sales order
Args:
url(str): url for retrieval sales order
"""
path = f'/channel/{channel_id}/order/{order_id}'
url = urlparse(self._fulfillment_url)._replace(path=path).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
return r.json()
def get_orders_status(self, channel_id=None, partner_id=None, list_id=None,
since=None, order_status=None):
"""Get list order status of sales order
Args:
channel_id(str): channel_id of cpms
partner_id(str): merchant/partner id of cpms
list_id(list): list of order id
since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z
order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)
Returns:
list: all orders
"""
if order_status and order_status not in self.ORDER_STATUS:
raise ValueError(
'invalid order_status eg. '
'(NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'
)
url = urlparse(self._fulfillment_url)
# make sure channel_id or partner_id being supply
if channel_id:
path = f'/channel/{channel_id}'
elif partner_id:
path = f'/partner/{partner_id}'
else:
raise ValueError(
'must supply either channel_id or partner_id args')
# append sales-order-status path
path += '/sales-order-status'
# make sure list_id or since being supply
if list_id:
if len(list_id) > 10:
raise ValueError('list_id can\'t be more than 10 length')
path += '/id'
query_string = {'id': list_id}
elif since:
query_string = {'id': list_id}
if order_status in self.ORDER_STATUS:
query_string.update({'orderStatus': order_status})
else:
raise ValueError('must supply either list_id or since args')
query_string = urlencode(query_string, doseq=True)
url = url._replace(path=path, query=query_string).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
orders = r.json()
next_url = r.links['next']['url'] if 'next' in r.links else None
return orders, next_url
def create_order(self, channel_id, order_id, payload):
"""create order to acommerce (CPMS)
Args:
channel_id(str): channel_id of cpms
order_id(str): order_id of merchant or partner
payload(dict): order body
Returns:
response or exception
"""
path = f'/channel/{channel_id}/order/{order_id}'
url = urlparse(self._fulfillment_url)._replace(path=path).geturl()
r = requests.put(url=url, json=payload, headers=self.headers)
validate_response(r)
return {
'code': r.status_code,
'message': 'Order has been successfully created'
}
def get_stocks(self, channel_id, partner_id, since):
"""Get list stock of partner from specifics channel/marketplace
Args:
channel_id(str): channel_id cpms
partner_id(str): partner/merchant id
since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z
Returns (list): list of stock
"""
path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'
query_string = urlencode({'since': since})
url = urlparse(self._fulfillment_url)._replace(
path=path, query=query_string).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
next_link = r.links['next']['url'] if 'next' in r.links else None
return {'data': r.json(), 'url': url} \
if next_link else {'data': r.json()}
def _get_webhook_path(self, channel_id, partner_id):
if not (channel_id or partner_id):
raise ValueError('channel_id or partner_id must be fill')
return f'/channel/{channel_id}' \
if channel_id else f'/partner/{partner_id}'
def create_webhook(self, payload, channel_id=None, partner_id=None):
"""Create webhook registration end point to acommerce either using
channel_id or partner_id
Args:
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
payload(str): webhook data format acommerce
Returns (dict): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.post(url=url, json=payload, headers=self.headers)
validate_response(r)
return r.json()
def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):
"""Retrieve specific webhook information using webhook_id.
must supply either partner_id or channel_id
Args:
webhook_id: registered webhook id
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns (dict): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += f'/hooks/{webhook_id}'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.get(url=url, headers=self.headers)
validate_response(r)
return r.json()
def get_webhook(self, channel_id=None, partner_id=None):
"""Get list registered webhook from acommerce using either partner_id
or channel_id
Args:
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns (list): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
return r.json()
def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):
"""remove a registered webhook
Args:
webhook_id: registered webhook id
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns No Content HTTP 204
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.delete(url, headers=self.headers)
validate_response(r)
return {
'code': r.status_code,
'message': 'Web Hook has been successfully deleted'
}
|
normal
|
{
"blob_id": "5bd2cf2ae68708d2b1dbbe0323a5f83837f7b564",
"index": 7842,
"step-1": "<mask token>\n\n\nclass CpmsConnector:\n <mask token>\n <mask token>\n\n def __init__(self, config):\n \"\"\"initialize with config\n config(dict): must supply username, api_key, api_url\n \"\"\"\n self.username = config['username']\n self.api_key = config['api_key']\n self.api_url = config['api_url']\n self._token = None\n self._set_token()\n\n @property\n def _fulfillment_url(self):\n netloc = f'fulfillment.{urlparse(self.api_url).netloc}'\n return urlparse(self.api_url)._replace(netloc=netloc).geturl()\n\n def _update_headers(self, token):\n self.headers = {'X-Subject-Token': token}\n <mask token>\n\n def _set_token(self):\n path = '/identity/token'\n payload = {'auth': {'apiKeyCredentials': {'username': self.username,\n 'apiKey': self.api_key}}}\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url, json=payload)\n validate_response(r)\n token = r.json()['token']['token_id']\n self._update_headers(token)\n self._token = token\n\n def get_order(self, channel_id, order_id):\n \"\"\"retrieve single order of sales order\n\n Args:\n url(str): url for retrieval sales order\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_orders_status(self, channel_id=None, partner_id=None, list_id=\n None, since=None, order_status=None):\n \"\"\"Get list order status of sales order\n\n Args:\n channel_id(str): channel_id of cpms\n partner_id(str): merchant/partner id of cpms\n list_id(list): list of order id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)\n\n Returns:\n list: all orders\n \"\"\"\n if order_status and order_status not in self.ORDER_STATUS:\n raise ValueError(\n 'invalid order_status eg. (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'\n )\n url = urlparse(self._fulfillment_url)\n if channel_id:\n path = f'/channel/{channel_id}'\n elif partner_id:\n path = f'/partner/{partner_id}'\n else:\n raise ValueError('must supply either channel_id or partner_id args'\n )\n path += '/sales-order-status'\n if list_id:\n if len(list_id) > 10:\n raise ValueError(\"list_id can't be more than 10 length\")\n path += '/id'\n query_string = {'id': list_id}\n elif since:\n query_string = {'id': list_id}\n if order_status in self.ORDER_STATUS:\n query_string.update({'orderStatus': order_status})\n else:\n raise ValueError('must supply either list_id or since args')\n query_string = urlencode(query_string, doseq=True)\n url = url._replace(path=path, query=query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n orders = r.json()\n next_url = r.links['next']['url'] if 'next' in r.links else None\n return orders, next_url\n\n def create_order(self, channel_id, order_id, payload):\n \"\"\"create order to acommerce (CPMS)\n\n Args:\n channel_id(str): channel_id of cpms\n order_id(str): order_id of merchant or partner\n payload(dict): order body\n\n Returns:\n response or exception\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.put(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Order has been successfully created'}\n\n def get_stocks(self, channel_id, partner_id, since):\n \"\"\"Get list stock of partner from specifics channel/marketplace\n\n Args:\n channel_id(str): channel_id cpms\n partner_id(str): partner/merchant id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n\n Returns (list): list of stock\n\n \"\"\"\n path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'\n query_string = urlencode({'since': since})\n url = urlparse(self._fulfillment_url)._replace(path=path, query=\n query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n next_link = r.links['next']['url'] if 'next' in r.links else None\n return {'data': r.json(), 'url': url} if next_link else {'data': r.\n json()}\n\n def _get_webhook_path(self, channel_id, partner_id):\n if not (channel_id or partner_id):\n raise ValueError('channel_id or partner_id must be fill')\n return (f'/channel/{channel_id}' if channel_id else\n f'/partner/{partner_id}')\n\n def create_webhook(self, payload, channel_id=None, partner_id=None):\n \"\"\"Create webhook registration end point to acommerce either using\n channel_id or partner_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n payload(str): webhook data format acommerce\n\n Returns (dict): webhook data informations\n\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"Retrieve specific webhook information using webhook_id.\n must supply either partner_id or channel_id\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (dict): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += f'/hooks/{webhook_id}'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url=url, headers=self.headers)\n validate_response(r)\n return r.json()\n <mask token>\n\n def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"remove a registered webhook\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns No Content HTTP 204\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.delete(url, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Web Hook has been successfully deleted'}\n",
"step-2": "<mask token>\n\n\nclass CpmsConnector:\n <mask token>\n ORDER_STATUS = 'NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR'\n\n def __init__(self, config):\n \"\"\"initialize with config\n config(dict): must supply username, api_key, api_url\n \"\"\"\n self.username = config['username']\n self.api_key = config['api_key']\n self.api_url = config['api_url']\n self._token = None\n self._set_token()\n\n @property\n def _fulfillment_url(self):\n netloc = f'fulfillment.{urlparse(self.api_url).netloc}'\n return urlparse(self.api_url)._replace(netloc=netloc).geturl()\n\n def _update_headers(self, token):\n self.headers = {'X-Subject-Token': token}\n\n @property\n def token(self):\n return self._token\n\n def _set_token(self):\n path = '/identity/token'\n payload = {'auth': {'apiKeyCredentials': {'username': self.username,\n 'apiKey': self.api_key}}}\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url, json=payload)\n validate_response(r)\n token = r.json()['token']['token_id']\n self._update_headers(token)\n self._token = token\n\n def get_order(self, channel_id, order_id):\n \"\"\"retrieve single order of sales order\n\n Args:\n url(str): url for retrieval sales order\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_orders_status(self, channel_id=None, partner_id=None, list_id=\n None, since=None, order_status=None):\n \"\"\"Get list order status of sales order\n\n Args:\n channel_id(str): channel_id of cpms\n partner_id(str): merchant/partner id of cpms\n list_id(list): list of order id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)\n\n Returns:\n list: all orders\n \"\"\"\n if order_status and order_status not in self.ORDER_STATUS:\n raise ValueError(\n 'invalid order_status eg. (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'\n )\n url = urlparse(self._fulfillment_url)\n if channel_id:\n path = f'/channel/{channel_id}'\n elif partner_id:\n path = f'/partner/{partner_id}'\n else:\n raise ValueError('must supply either channel_id or partner_id args'\n )\n path += '/sales-order-status'\n if list_id:\n if len(list_id) > 10:\n raise ValueError(\"list_id can't be more than 10 length\")\n path += '/id'\n query_string = {'id': list_id}\n elif since:\n query_string = {'id': list_id}\n if order_status in self.ORDER_STATUS:\n query_string.update({'orderStatus': order_status})\n else:\n raise ValueError('must supply either list_id or since args')\n query_string = urlencode(query_string, doseq=True)\n url = url._replace(path=path, query=query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n orders = r.json()\n next_url = r.links['next']['url'] if 'next' in r.links else None\n return orders, next_url\n\n def create_order(self, channel_id, order_id, payload):\n \"\"\"create order to acommerce (CPMS)\n\n Args:\n channel_id(str): channel_id of cpms\n order_id(str): order_id of merchant or partner\n payload(dict): order body\n\n Returns:\n response or exception\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.put(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Order has been successfully created'}\n\n def get_stocks(self, channel_id, partner_id, since):\n \"\"\"Get list stock of partner from specifics channel/marketplace\n\n Args:\n channel_id(str): channel_id cpms\n partner_id(str): partner/merchant id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n\n Returns (list): list of stock\n\n \"\"\"\n path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'\n query_string = urlencode({'since': since})\n url = urlparse(self._fulfillment_url)._replace(path=path, query=\n query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n next_link = r.links['next']['url'] if 'next' in r.links else None\n return {'data': r.json(), 'url': url} if next_link else {'data': r.\n json()}\n\n def _get_webhook_path(self, channel_id, partner_id):\n if not (channel_id or partner_id):\n raise ValueError('channel_id or partner_id must be fill')\n return (f'/channel/{channel_id}' if channel_id else\n f'/partner/{partner_id}')\n\n def create_webhook(self, payload, channel_id=None, partner_id=None):\n \"\"\"Create webhook registration end point to acommerce either using\n channel_id or partner_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n payload(str): webhook data format acommerce\n\n Returns (dict): webhook data informations\n\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"Retrieve specific webhook information using webhook_id.\n must supply either partner_id or channel_id\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (dict): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += f'/hooks/{webhook_id}'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url=url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_webhook(self, channel_id=None, partner_id=None):\n \"\"\"Get list registered webhook from acommerce using either partner_id\n or channel_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (list): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"remove a registered webhook\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns No Content HTTP 204\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.delete(url, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Web Hook has been successfully deleted'}\n",
"step-3": "<mask token>\n\n\nclass CpmsConnector:\n \"\"\"The CpmsConnector object allow you communicate through\n cpms between application.\n \"\"\"\n ORDER_STATUS = 'NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR'\n\n def __init__(self, config):\n \"\"\"initialize with config\n config(dict): must supply username, api_key, api_url\n \"\"\"\n self.username = config['username']\n self.api_key = config['api_key']\n self.api_url = config['api_url']\n self._token = None\n self._set_token()\n\n @property\n def _fulfillment_url(self):\n netloc = f'fulfillment.{urlparse(self.api_url).netloc}'\n return urlparse(self.api_url)._replace(netloc=netloc).geturl()\n\n def _update_headers(self, token):\n self.headers = {'X-Subject-Token': token}\n\n @property\n def token(self):\n return self._token\n\n def _set_token(self):\n path = '/identity/token'\n payload = {'auth': {'apiKeyCredentials': {'username': self.username,\n 'apiKey': self.api_key}}}\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url, json=payload)\n validate_response(r)\n token = r.json()['token']['token_id']\n self._update_headers(token)\n self._token = token\n\n def get_order(self, channel_id, order_id):\n \"\"\"retrieve single order of sales order\n\n Args:\n url(str): url for retrieval sales order\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_orders_status(self, channel_id=None, partner_id=None, list_id=\n None, since=None, order_status=None):\n \"\"\"Get list order status of sales order\n\n Args:\n channel_id(str): channel_id of cpms\n partner_id(str): merchant/partner id of cpms\n list_id(list): list of order id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)\n\n Returns:\n list: all orders\n \"\"\"\n if order_status and order_status not in self.ORDER_STATUS:\n raise ValueError(\n 'invalid order_status eg. (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'\n )\n url = urlparse(self._fulfillment_url)\n if channel_id:\n path = f'/channel/{channel_id}'\n elif partner_id:\n path = f'/partner/{partner_id}'\n else:\n raise ValueError('must supply either channel_id or partner_id args'\n )\n path += '/sales-order-status'\n if list_id:\n if len(list_id) > 10:\n raise ValueError(\"list_id can't be more than 10 length\")\n path += '/id'\n query_string = {'id': list_id}\n elif since:\n query_string = {'id': list_id}\n if order_status in self.ORDER_STATUS:\n query_string.update({'orderStatus': order_status})\n else:\n raise ValueError('must supply either list_id or since args')\n query_string = urlencode(query_string, doseq=True)\n url = url._replace(path=path, query=query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n orders = r.json()\n next_url = r.links['next']['url'] if 'next' in r.links else None\n return orders, next_url\n\n def create_order(self, channel_id, order_id, payload):\n \"\"\"create order to acommerce (CPMS)\n\n Args:\n channel_id(str): channel_id of cpms\n order_id(str): order_id of merchant or partner\n payload(dict): order body\n\n Returns:\n response or exception\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.put(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Order has been successfully created'}\n\n def get_stocks(self, channel_id, partner_id, since):\n \"\"\"Get list stock of partner from specifics channel/marketplace\n\n Args:\n channel_id(str): channel_id cpms\n partner_id(str): partner/merchant id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n\n Returns (list): list of stock\n\n \"\"\"\n path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'\n query_string = urlencode({'since': since})\n url = urlparse(self._fulfillment_url)._replace(path=path, query=\n query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n next_link = r.links['next']['url'] if 'next' in r.links else None\n return {'data': r.json(), 'url': url} if next_link else {'data': r.\n json()}\n\n def _get_webhook_path(self, channel_id, partner_id):\n if not (channel_id or partner_id):\n raise ValueError('channel_id or partner_id must be fill')\n return (f'/channel/{channel_id}' if channel_id else\n f'/partner/{partner_id}')\n\n def create_webhook(self, payload, channel_id=None, partner_id=None):\n \"\"\"Create webhook registration end point to acommerce either using\n channel_id or partner_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n payload(str): webhook data format acommerce\n\n Returns (dict): webhook data informations\n\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"Retrieve specific webhook information using webhook_id.\n must supply either partner_id or channel_id\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (dict): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += f'/hooks/{webhook_id}'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url=url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_webhook(self, channel_id=None, partner_id=None):\n \"\"\"Get list registered webhook from acommerce using either partner_id\n or channel_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (list): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"remove a registered webhook\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns No Content HTTP 204\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.delete(url, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Web Hook has been successfully deleted'}\n",
"step-4": "import requests\nfrom urllib.parse import urlparse, urlencode\nfrom json import JSONDecodeError\nfrom requests.exceptions import HTTPError\n\n\ndef validate_response(response):\n \"\"\"\n raise exception if error response occurred\n \"\"\"\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n raise HTTPError(message)\n\n\nclass CpmsConnector:\n \"\"\"The CpmsConnector object allow you communicate through\n cpms between application.\n \"\"\"\n ORDER_STATUS = 'NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR'\n\n def __init__(self, config):\n \"\"\"initialize with config\n config(dict): must supply username, api_key, api_url\n \"\"\"\n self.username = config['username']\n self.api_key = config['api_key']\n self.api_url = config['api_url']\n self._token = None\n self._set_token()\n\n @property\n def _fulfillment_url(self):\n netloc = f'fulfillment.{urlparse(self.api_url).netloc}'\n return urlparse(self.api_url)._replace(netloc=netloc).geturl()\n\n def _update_headers(self, token):\n self.headers = {'X-Subject-Token': token}\n\n @property\n def token(self):\n return self._token\n\n def _set_token(self):\n path = '/identity/token'\n payload = {'auth': {'apiKeyCredentials': {'username': self.username,\n 'apiKey': self.api_key}}}\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url, json=payload)\n validate_response(r)\n token = r.json()['token']['token_id']\n self._update_headers(token)\n self._token = token\n\n def get_order(self, channel_id, order_id):\n \"\"\"retrieve single order of sales order\n\n Args:\n url(str): url for retrieval sales order\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_orders_status(self, channel_id=None, partner_id=None, list_id=\n None, since=None, order_status=None):\n \"\"\"Get list order status of sales order\n\n Args:\n channel_id(str): channel_id of cpms\n partner_id(str): merchant/partner id of cpms\n list_id(list): list of order id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)\n\n Returns:\n list: all orders\n \"\"\"\n if order_status and order_status not in self.ORDER_STATUS:\n raise ValueError(\n 'invalid order_status eg. (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'\n )\n url = urlparse(self._fulfillment_url)\n if channel_id:\n path = f'/channel/{channel_id}'\n elif partner_id:\n path = f'/partner/{partner_id}'\n else:\n raise ValueError('must supply either channel_id or partner_id args'\n )\n path += '/sales-order-status'\n if list_id:\n if len(list_id) > 10:\n raise ValueError(\"list_id can't be more than 10 length\")\n path += '/id'\n query_string = {'id': list_id}\n elif since:\n query_string = {'id': list_id}\n if order_status in self.ORDER_STATUS:\n query_string.update({'orderStatus': order_status})\n else:\n raise ValueError('must supply either list_id or since args')\n query_string = urlencode(query_string, doseq=True)\n url = url._replace(path=path, query=query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n orders = r.json()\n next_url = r.links['next']['url'] if 'next' in r.links else None\n return orders, next_url\n\n def create_order(self, channel_id, order_id, payload):\n \"\"\"create order to acommerce (CPMS)\n\n Args:\n channel_id(str): channel_id of cpms\n order_id(str): order_id of merchant or partner\n payload(dict): order body\n\n Returns:\n response or exception\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.put(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Order has been successfully created'}\n\n def get_stocks(self, channel_id, partner_id, since):\n \"\"\"Get list stock of partner from specifics channel/marketplace\n\n Args:\n channel_id(str): channel_id cpms\n partner_id(str): partner/merchant id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n\n Returns (list): list of stock\n\n \"\"\"\n path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'\n query_string = urlencode({'since': since})\n url = urlparse(self._fulfillment_url)._replace(path=path, query=\n query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n next_link = r.links['next']['url'] if 'next' in r.links else None\n return {'data': r.json(), 'url': url} if next_link else {'data': r.\n json()}\n\n def _get_webhook_path(self, channel_id, partner_id):\n if not (channel_id or partner_id):\n raise ValueError('channel_id or partner_id must be fill')\n return (f'/channel/{channel_id}' if channel_id else\n f'/partner/{partner_id}')\n\n def create_webhook(self, payload, channel_id=None, partner_id=None):\n \"\"\"Create webhook registration end point to acommerce either using\n channel_id or partner_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n payload(str): webhook data format acommerce\n\n Returns (dict): webhook data informations\n\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url=url, json=payload, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"Retrieve specific webhook information using webhook_id.\n must supply either partner_id or channel_id\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (dict): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += f'/hooks/{webhook_id}'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url=url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_webhook(self, channel_id=None, partner_id=None):\n \"\"\"Get list registered webhook from acommerce using either partner_id\n or channel_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (list): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"remove a registered webhook\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns No Content HTTP 204\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.delete(url, headers=self.headers)\n validate_response(r)\n return {'code': r.status_code, 'message':\n 'Web Hook has been successfully deleted'}\n",
"step-5": "import requests\nfrom urllib.parse import urlparse, urlencode\nfrom json import JSONDecodeError\nfrom requests.exceptions import HTTPError\n\n\ndef validate_response(response):\n \"\"\"\n raise exception if error response occurred\n \"\"\"\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)\n\n\nclass CpmsConnector:\n \"\"\"The CpmsConnector object allow you communicate through\n cpms between application.\n \"\"\"\n\n ORDER_STATUS = ('NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR')\n\n def __init__(self, config):\n \"\"\"initialize with config\n config(dict): must supply username, api_key, api_url\n \"\"\"\n self.username = config['username']\n self.api_key = config['api_key']\n self.api_url = config['api_url']\n self._token = None\n self._set_token()\n\n @property\n def _fulfillment_url(self):\n netloc = f'fulfillment.{urlparse(self.api_url).netloc}'\n return urlparse(self.api_url)._replace(netloc=netloc).geturl()\n\n def _update_headers(self, token):\n self.headers = {\n 'X-Subject-Token': token\n }\n\n @property\n def token(self):\n return self._token\n\n def _set_token(self):\n path = '/identity/token'\n\n payload = {\n \"auth\":\n {\n \"apiKeyCredentials\":\n {\n \"username\": self.username,\n \"apiKey\": self.api_key\n }\n }\n }\n\n url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.post(url, json=payload)\n validate_response(r)\n token = r.json()['token']['token_id']\n self._update_headers(token)\n self._token = token\n\n def get_order(self, channel_id, order_id):\n \"\"\"retrieve single order of sales order\n\n Args:\n url(str): url for retrieval sales order\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n return r.json()\n\n def get_orders_status(self, channel_id=None, partner_id=None, list_id=None,\n since=None, order_status=None):\n \"\"\"Get list order status of sales order\n\n Args:\n channel_id(str): channel_id of cpms\n partner_id(str): merchant/partner id of cpms\n list_id(list): list of order id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)\n\n Returns:\n list: all orders\n \"\"\"\n\n if order_status and order_status not in self.ORDER_STATUS:\n raise ValueError(\n 'invalid order_status eg. '\n '(NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'\n )\n\n url = urlparse(self._fulfillment_url)\n\n # make sure channel_id or partner_id being supply\n if channel_id:\n path = f'/channel/{channel_id}'\n\n elif partner_id:\n path = f'/partner/{partner_id}'\n\n else:\n raise ValueError(\n 'must supply either channel_id or partner_id args')\n\n # append sales-order-status path\n path += '/sales-order-status'\n\n # make sure list_id or since being supply\n if list_id:\n if len(list_id) > 10:\n raise ValueError('list_id can\\'t be more than 10 length')\n path += '/id'\n query_string = {'id': list_id}\n\n elif since:\n query_string = {'id': list_id}\n if order_status in self.ORDER_STATUS:\n query_string.update({'orderStatus': order_status})\n else:\n raise ValueError('must supply either list_id or since args')\n\n query_string = urlencode(query_string, doseq=True)\n url = url._replace(path=path, query=query_string).geturl()\n\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n orders = r.json()\n next_url = r.links['next']['url'] if 'next' in r.links else None\n return orders, next_url\n\n def create_order(self, channel_id, order_id, payload):\n \"\"\"create order to acommerce (CPMS)\n\n Args:\n channel_id(str): channel_id of cpms\n order_id(str): order_id of merchant or partner\n payload(dict): order body\n\n Returns:\n response or exception\n \"\"\"\n path = f'/channel/{channel_id}/order/{order_id}'\n url = urlparse(self._fulfillment_url)._replace(path=path).geturl()\n\n r = requests.put(url=url, json=payload, headers=self.headers)\n validate_response(r)\n\n return {\n 'code': r.status_code,\n 'message': 'Order has been successfully created'\n }\n\n def get_stocks(self, channel_id, partner_id, since):\n \"\"\"Get list stock of partner from specifics channel/marketplace\n\n Args:\n channel_id(str): channel_id cpms\n partner_id(str): partner/merchant id\n since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z\n\n Returns (list): list of stock\n\n \"\"\"\n path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'\n query_string = urlencode({'since': since})\n url = urlparse(self._fulfillment_url)._replace(\n path=path, query=query_string).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n\n next_link = r.links['next']['url'] if 'next' in r.links else None\n return {'data': r.json(), 'url': url} \\\n if next_link else {'data': r.json()}\n\n def _get_webhook_path(self, channel_id, partner_id):\n if not (channel_id or partner_id):\n raise ValueError('channel_id or partner_id must be fill')\n return f'/channel/{channel_id}' \\\n if channel_id else f'/partner/{partner_id}'\n\n def create_webhook(self, payload, channel_id=None, partner_id=None):\n \"\"\"Create webhook registration end point to acommerce either using\n channel_id or partner_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n payload(str): webhook data format acommerce\n\n Returns (dict): webhook data informations\n\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n\n url = urlparse(self.api_url)._replace(path=path).geturl()\n\n r = requests.post(url=url, json=payload, headers=self.headers)\n validate_response(r)\n\n return r.json()\n\n def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"Retrieve specific webhook information using webhook_id.\n must supply either partner_id or channel_id\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (dict): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += f'/hooks/{webhook_id}'\n\n url = urlparse(self.api_url)._replace(path=path).geturl()\n\n r = requests.get(url=url, headers=self.headers)\n validate_response(r)\n\n return r.json()\n\n def get_webhook(self, channel_id=None, partner_id=None):\n \"\"\"Get list registered webhook from acommerce using either partner_id\n or channel_id\n\n Args:\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns (list): webhook data informations\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = url = urlparse(self.api_url)._replace(path=path).geturl()\n r = requests.get(url, headers=self.headers)\n validate_response(r)\n\n return r.json()\n\n def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):\n \"\"\"remove a registered webhook\n\n Args:\n webhook_id: registered webhook id\n channel_id(str): channel_id of acommerce (CPMS)\n partner_id(str): merchant or partner id acommerce (CPMS)\n\n Returns No Content HTTP 204\n \"\"\"\n path = self._get_webhook_path(channel_id, partner_id)\n path += '/hooks'\n url = urlparse(self.api_url)._replace(path=path).geturl()\n\n r = requests.delete(url, headers=self.headers)\n validate_response(r)\n\n return {\n 'code': r.status_code,\n 'message': 'Web Hook has been successfully deleted'\n }\n",
"step-ids": [
13,
16,
17,
19,
20
]
}
|
[
13,
16,
17,
19,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from socket import socket
<|reserved_special_token_1|>
#!/usr/bin python3
# coding: utf-8
"""
AUTHOR: bovenson
EMAIL: szhkai@qq.com
FILE: 03.py
DATE: 17-9-25 下午7:59
DESC:
"""
from socket import socket
|
flexible
|
{
"blob_id": "74d1491280eba1ceb06ccf6f45546cdb41149687",
"index": 5642,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom socket import socket\n",
"step-3": "#!/usr/bin python3\n# coding: utf-8\n\n\"\"\"\nAUTHOR: bovenson\nEMAIL: szhkai@qq.com\nFILE: 03.py\nDATE: 17-9-25 下午7:59\nDESC:\n\"\"\"\n\nfrom socket import socket\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
alunos = list()
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1+nota2)/2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' *30)
print(f'{"Nº":<4}{"Nome":<10}{"Média":>8}')
print('-' *30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' *30)
notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos)-1:
print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')
|
normal
|
{
"blob_id": "8dcd4914c58a7ecafdfdd70b698ef3b7141386a6",
"index": 2632,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-3": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-4": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1+nota2)/2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' *30)\nprint(f'{\"Nº\":<4}{\"Nome\":<10}{\"Média\":>8}')\nprint('-' *30)\nfor i, v in enumerate(alunos): \n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' *30)\n notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos)-1:\n print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Write a class to hold player information, e.g. what room they are in
# currently.
class Player():
def __init__(self, name, location, items=[]):
self.name = name
self.location = location
self.items = items
# def try_direction(self, user_action):
# attribute = user_action + '_to'
# # see if the current room has an attribute
# # we can use 'hasattr' (has attribute)
# if hasattr(self.location, attribute):
# # can use 'getattr' to move to room
# self.location = getattr(self.location, attribute)
# else:
# print("Nothing to find here!")
def pick_up_item(self, item):
if len(self.items) <= 3:
self.items.append(item)
print(f"""\n\nNOW YOU HAVE THE {item}!
You can drop it at any time by typing 'drop {item}'\n""")
else:
print("Sorry you'll have to drop something to pick this up.")
def drop_item(self, item):
if len(self.items) > 0:
self.items.remove(item)
print(f"YOU HAVE DROPPED THE {item}.")
else:
print("You don't have any items to drop!")
# add for player to print what items they have
# def print_items
|
normal
|
{
"blob_id": "b355bd5a519d65ea35d4e8d5e6a384424d79130a",
"index": 3620,
"step-1": "<mask token>\n",
"step-2": "class Player:\n <mask token>\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n <mask token>\n",
"step-3": "class Player:\n <mask token>\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f'YOU HAVE DROPPED THE {item}.')\n else:\n print(\"You don't have any items to drop!\")\n",
"step-4": "class Player:\n\n def __init__(self, name, location, items=[]):\n self.name = name\n self.location = location\n self.items = items\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(\n f\"\"\"\n\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\n\"\"\"\n )\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f'YOU HAVE DROPPED THE {item}.')\n else:\n print(\"You don't have any items to drop!\")\n",
"step-5": "# Write a class to hold player information, e.g. what room they are in\n# currently.\n\n\nclass Player():\n def __init__(self, name, location, items=[]):\n self.name = name\n self.location = location\n self.items = items\n\n # def try_direction(self, user_action):\n # attribute = user_action + '_to'\n\n # # see if the current room has an attribute\n # # we can use 'hasattr' (has attribute)\n # if hasattr(self.location, attribute):\n # # can use 'getattr' to move to room\n # self.location = getattr(self.location, attribute)\n # else:\n # print(\"Nothing to find here!\")\n\n def pick_up_item(self, item):\n if len(self.items) <= 3:\n self.items.append(item)\n print(f\"\"\"\\n\\nNOW YOU HAVE THE {item}!\nYou can drop it at any time by typing 'drop {item}'\\n\"\"\")\n else:\n print(\"Sorry you'll have to drop something to pick this up.\")\n\n def drop_item(self, item):\n if len(self.items) > 0:\n self.items.remove(item)\n print(f\"YOU HAVE DROPPED THE {item}.\")\n else:\n print(\"You don't have any items to drop!\")\n\n# add for player to print what items they have\n# def print_items\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# The actual code begins here
# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB
# The code is written to see what are some key words of the reviews from critics and normal viewers
# And to see what are some of the differences
# The second task is to asses the people's emotion vs. actual score given
# First, we need to load back everything we dumped to folder via pickle.
import pickle
print('loading data...')
with open('movienumbers.pickle','rb') as input_file:
movienumbers = pickle.load(input_file)
with open('ratings.pickle','rb') as input_file:
ratings = pickle.load(input_file)
with open('userratings.pickle','rb') as input_file:
userratings = pickle.load(input_file)
with open('metaratings.pickle','rb') as input_file:
metaratings = pickle.load(input_file)
print('Pickled data successfully loaded.')
# then, it's time to use nltp to see the score of the critics vs. viewers on movies
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# print(movienumbers)
# print(ratings)
# print(userratings)
# print(metaratings)
# Userratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
# print(userratings['tt0111161'])
#
# print(metaratings['tt0111161'])
# print(ratings['tt0111161'])
userscore = {}
for movieid, reviews in userratings.items():
score = 0
for eachreviews in reviews:
score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score / len(reviews)
userscore[movieid] = average
print(userscore)
# Meta ratings is a dictionary in ways like this "ttxxxxxx : [reviews1, reviews2,...]"
criticsscore = {}
for movieid, reviews in metaratings.items():
score_1 = 0
for eachreviews in reviews:
score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']
average = score_1 / len(reviews)
criticsscore[movieid] = average
print(criticsscore)
# Question 1: Are critics always more positive than the audience?
counter = 0
for movieid, score in userscore.items():
if movieid in criticsscore and criticsscore[movieid] > score:
counter += 1
else:
counter += 0
# Displaying results to question 1
print("Critics overpraise these movies " + str(counter) + " times more than normal viewers out of "
+ str(len(criticsscore)) + " movies in total.")
if counter < (len(criticsscore) - counter):
print("Because the critics overpraise less than half of the movies sampled here, the critics are more refrained "
"than the users on IMDb.")
else:
print("Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained "
"than the users on IMDb.")
# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.
useriscloser = 0
criticiscloser = 0
for movieid, score in criticsscore.items():
if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):
useriscloser += 1
else:
criticiscloser += 1
# Displaying results to question 2
print("Critics are more closer to the ratings for " + str(criticiscloser) +
" times, while normal viewers are closer " + str(useriscloser) + " times out of " +
str(len(criticsscore)) + " movies in total.")
if useriscloser > criticiscloser:
print("Because the more movies have users resembling closer to the rating, the critics are less accurate "
"than the users on IMDb.")
else:
print("Because the more movies have critics resembling closer to the rating, the users are less accurate "
"than the users on IMDb.")
|
normal
|
{
"blob_id": "1f69cf5f6d15048e6ead37b5da836c9e2f783f74",
"index": 803,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\n<mask token>\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\n<mask token>\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\n<mask token>\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-3": "<mask token>\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\n<mask token>\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-4": "import pickle\nprint('loading data...')\nwith open('movienumbers.pickle', 'rb') as input_file:\n movienumbers = pickle.load(input_file)\nwith open('ratings.pickle', 'rb') as input_file:\n ratings = pickle.load(input_file)\nwith open('userratings.pickle', 'rb') as input_file:\n userratings = pickle.load(input_file)\nwith open('metaratings.pickle', 'rb') as input_file:\n metaratings = pickle.load(input_file)\nprint('Pickled data successfully loaded.')\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score / len(reviews)\n userscore[movieid] = average\nprint(userscore)\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)[\n 'compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\nprint(criticsscore)\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\nprint('Critics overpraise these movies ' + str(counter) +\n ' times more than normal viewers out of ' + str(len(criticsscore)) +\n ' movies in total.')\nif counter < len(criticsscore) - counter:\n print(\n 'Because the critics overpraise less than half of the movies sampled here, the critics are more refrained than the users on IMDb.'\n )\nelse:\n print(\n 'Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained than the users on IMDb.'\n )\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - ratings[movieid] / 10) > abs(score - \n ratings[movieid] / 10):\n useriscloser += 1\n else:\n criticiscloser += 1\nprint('Critics are more closer to the ratings for ' + str(criticiscloser) +\n ' times, while normal viewers are closer ' + str(useriscloser) +\n ' times out of ' + str(len(criticsscore)) + ' movies in total.')\nif useriscloser > criticiscloser:\n print(\n 'Because the more movies have users resembling closer to the rating, the critics are less accurate than the users on IMDb.'\n )\nelse:\n print(\n 'Because the more movies have critics resembling closer to the rating, the users are less accurate than the users on IMDb.'\n )\n",
"step-5": "# The actual code begins here\n# This file is intended to load everything downloaded from loaddata.py, preventing user getting banned from IMDB\n# The code is written to see what are some key words of the reviews from critics and normal viewers\n# And to see what are some of the differences\n# The second task is to asses the people's emotion vs. actual score given\n\n# First, we need to load back everything we dumped to folder via pickle.\n\nimport pickle\nprint('loading data...')\n\nwith open('movienumbers.pickle','rb') as input_file:\n movienumbers = pickle.load(input_file)\n\nwith open('ratings.pickle','rb') as input_file:\n ratings = pickle.load(input_file)\n\nwith open('userratings.pickle','rb') as input_file:\n userratings = pickle.load(input_file)\n\nwith open('metaratings.pickle','rb') as input_file:\n metaratings = pickle.load(input_file)\n\nprint('Pickled data successfully loaded.')\n\n# then, it's time to use nltp to see the score of the critics vs. viewers on movies\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n# print(movienumbers)\n# print(ratings)\n# print(userratings)\n# print(metaratings)\n\n# Userratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n# print(userratings['tt0111161'])\n#\n# print(metaratings['tt0111161'])\n# print(ratings['tt0111161'])\n\nuserscore = {}\nfor movieid, reviews in userratings.items():\n score = 0\n for eachreviews in reviews:\n score += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score / len(reviews)\n userscore[movieid] = average\n\nprint(userscore)\n\n# Meta ratings is a dictionary in ways like this \"ttxxxxxx : [reviews1, reviews2,...]\"\n\n\n\ncriticsscore = {}\nfor movieid, reviews in metaratings.items():\n score_1 = 0\n for eachreviews in reviews:\n score_1 += SentimentIntensityAnalyzer().polarity_scores(eachreviews)['compound']\n average = score_1 / len(reviews)\n criticsscore[movieid] = average\n\nprint(criticsscore)\n\n\n# Question 1: Are critics always more positive than the audience?\n\ncounter = 0\nfor movieid, score in userscore.items():\n if movieid in criticsscore and criticsscore[movieid] > score:\n counter += 1\n else:\n counter += 0\n\n# Displaying results to question 1\nprint(\"Critics overpraise these movies \" + str(counter) + \" times more than normal viewers out of \"\n + str(len(criticsscore)) + \" movies in total.\")\nif counter < (len(criticsscore) - counter):\n print(\"Because the critics overpraise less than half of the movies sampled here, the critics are more refrained \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the critics overpraise no less than half of the movies sampled here, the critics are less refrained \"\n \"than the users on IMDb.\")\n\n# Question 2: Is the IMDB score closer to the users' sentiment? Or the critics.\n\nuseriscloser = 0\ncriticiscloser = 0\nfor movieid, score in criticsscore.items():\n if abs(userscore[movieid] - (ratings[movieid])/10) > abs(score - (ratings[movieid]/10)):\n useriscloser += 1\n else:\n criticiscloser += 1\n\n# Displaying results to question 2\nprint(\"Critics are more closer to the ratings for \" + str(criticiscloser) +\n \" times, while normal viewers are closer \" + str(useriscloser) + \" times out of \" +\n str(len(criticsscore)) + \" movies in total.\")\n\nif useriscloser > criticiscloser:\n print(\"Because the more movies have users resembling closer to the rating, the critics are less accurate \"\n \"than the users on IMDb.\")\nelse:\n print(\"Because the more movies have critics resembling closer to the rating, the users are less accurate \"\n \"than the users on IMDb.\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 1 (requires access to a database).\n ',
placeholder='FOI Information')
config_info = HTML(value=
"""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
"""
, placeholder='FOI Information')
config_conn = Button(value=False, button_style='info', tooltip=
'Configure db connection.', icon='cogs', layout=Layout(width='40px'))
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn, config_conn_box])
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database"""
)
db_tables = Dropdown(options=[], description='db Tables:')
refresh_db_tables = Button(value=False, button_style='info', tooltip=
'Get db tables.', icon='refresh', layout=Layout(width='40px'))
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(description='Create new table', value=False,
button_style='info', tooltip='upload_shp.', icon='up')
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='info', tooltips=[
'Upnload your base image', 'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
dbf_info = HTML(
"""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database"""
)
dbf_insert = Button(value=False, button_style='info', tooltip=
'Create functions.', icon='fa-share-square')
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f'{path_foi_func}*.func')
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(schema=sche, owner
=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in
functions]
outlog(
f"The functions: {''.join(finc_list)} where added to the database"
)
except Exception as err:
outlog('Could not add functions to dattabase.', err)
dbf_box = VBox([dbf_info, dbf_insert])
param_info = HTML('6. Set FOI v1 Parameters')
param_heto_info = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
param_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_area_info = HTML(
"""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
"""
)
param_area = IntText(value=2000, description='area:', tooltip=
'Minimum area for clusters selection.', layout=Layout(width='200px'))
param_box = VBox([param_info, param_heto_info, HBox([param_min_het,
param_max_het]), param_area_info, param_area])
run_info = Label('7. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v1', value=False,
button_style='info', tooltip='Run FOI analysis version 1', icon='play')
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(db_tables.value,
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,
dbf_box, param_box, run_box, progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 2 (does not require access to a database).\n '
, placeholder='FOI Information')
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality."""
)
shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',
'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='', tooltips=['Upnload your base image',
'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
pre_info = Label('4. Set FOI v2 Parameters.')
pre_heto_chec = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
pre_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip
='Minimum area for clusters selection.', disabled=False, layout=
Layout(width='200px'))
pre_pixel_connectivity = IntText(value=8, description=
'connectivity type:', tooltip=
'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',
disabled=False, layout=Layout(width='200px'))
pre_negative_buffer = IntText(value=-10, description='negative buffer:',
tooltip='Negative buffer to be applied on the FOI', disabled=False,
layout=Layout(width='200px'))
pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer, HBox([
pre_min_cluster_size, HTML(
'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'
)])])
run_info = Label('5. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v2', value=False, disabled=
False, button_style='info', tooltip='Run FOI analysis version 2',
icon='play')
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f'{path_foi}vector/{shp_file.children[1].children[0].value}',
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
pre_negative_buffer.value, pre_min_het.value, pre_max_het.
value, pre_pixel_connectivity.value, pre_min_cluster_size.value
)
wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,
run_box, progress])
return wbox_v2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 1 (requires access to a database).\n ',
placeholder='FOI Information')
config_info = HTML(value=
"""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
"""
, placeholder='FOI Information')
config_conn = Button(value=False, button_style='info', tooltip=
'Configure db connection.', icon='cogs', layout=Layout(width='40px'))
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn, config_conn_box])
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database"""
)
db_tables = Dropdown(options=[], description='db Tables:')
refresh_db_tables = Button(value=False, button_style='info', tooltip=
'Get db tables.', icon='refresh', layout=Layout(width='40px'))
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(description='Create new table', value=False,
button_style='info', tooltip='upload_shp.', icon='up')
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='info', tooltips=[
'Upnload your base image', 'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
dbf_info = HTML(
"""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database"""
)
dbf_insert = Button(value=False, button_style='info', tooltip=
'Create functions.', icon='fa-share-square')
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f'{path_foi_func}*.func')
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(schema=sche, owner
=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in
functions]
outlog(
f"The functions: {''.join(finc_list)} where added to the database"
)
except Exception as err:
outlog('Could not add functions to dattabase.', err)
dbf_box = VBox([dbf_info, dbf_insert])
param_info = HTML('6. Set FOI v1 Parameters')
param_heto_info = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
param_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_area_info = HTML(
"""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
"""
)
param_area = IntText(value=2000, description='area:', tooltip=
'Minimum area for clusters selection.', layout=Layout(width='200px'))
param_box = VBox([param_info, param_heto_info, HBox([param_min_het,
param_max_het]), param_area_info, param_area])
run_info = Label('7. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v1', value=False,
button_style='info', tooltip='Run FOI analysis version 1', icon='play')
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(db_tables.value,
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,
dbf_box, param_box, run_box, progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 2 (does not require access to a database).\n '
, placeholder='FOI Information')
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality."""
)
shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',
'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='', tooltips=['Upnload your base image',
'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
pre_info = Label('4. Set FOI v2 Parameters.')
pre_heto_chec = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
pre_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip
='Minimum area for clusters selection.', disabled=False, layout=
Layout(width='200px'))
pre_pixel_connectivity = IntText(value=8, description=
'connectivity type:', tooltip=
'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',
disabled=False, layout=Layout(width='200px'))
pre_negative_buffer = IntText(value=-10, description='negative buffer:',
tooltip='Negative buffer to be applied on the FOI', disabled=False,
layout=Layout(width='200px'))
pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer, HBox([
pre_min_cluster_size, HTML(
'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'
)])])
run_info = Label('5. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v2', value=False, disabled=
False, button_style='info', tooltip='Run FOI analysis version 2',
icon='play')
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f'{path_foi}vector/{shp_file.children[1].children[0].value}',
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
pre_negative_buffer.value, pre_min_het.value, pre_max_het.
value, pre_pixel_connectivity.value, pre_min_cluster_size.value
)
wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,
run_box, progress])
return wbox_v2
<|reserved_special_token_1|>
import os
import glob
from ipywidgets import Text, Label, HBox, VBox, Layout, Dropdown, ToggleButtons, Output, HTML, Button, FileUpload, IntText, RadioButtons
from cbm.utils import config
from cbm.ipycbm.utils import settings_ds, cbm_widgets
from cbm.ipycbm.ipy_ext import ext_func
from cbm.foi import foi_v1
from cbm.datas import db
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 1 (requires access to a database).\n ',
placeholder='FOI Information')
config_info = HTML(value=
"""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
"""
, placeholder='FOI Information')
config_conn = Button(value=False, button_style='info', tooltip=
'Configure db connection.', icon='cogs', layout=Layout(width='40px'))
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn, config_conn_box])
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database"""
)
db_tables = Dropdown(options=[], description='db Tables:')
refresh_db_tables = Button(value=False, button_style='info', tooltip=
'Get db tables.', icon='refresh', layout=Layout(width='40px'))
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(description='Create new table', value=False,
button_style='info', tooltip='upload_shp.', icon='up')
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='info', tooltips=[
'Upnload your base image', 'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
dbf_info = HTML(
"""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database"""
)
dbf_insert = Button(value=False, button_style='info', tooltip=
'Create functions.', icon='fa-share-square')
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f'{path_foi_func}*.func')
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(schema=sche, owner
=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in
functions]
outlog(
f"The functions: {''.join(finc_list)} where added to the database"
)
except Exception as err:
outlog('Could not add functions to dattabase.', err)
dbf_box = VBox([dbf_info, dbf_insert])
param_info = HTML('6. Set FOI v1 Parameters')
param_heto_info = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
param_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', layout=Layout(width=
'150px'))
param_area_info = HTML(
"""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
"""
)
param_area = IntText(value=2000, description='area:', tooltip=
'Minimum area for clusters selection.', layout=Layout(width='200px'))
param_box = VBox([param_info, param_heto_info, HBox([param_min_het,
param_max_het]), param_area_info, param_area])
run_info = Label('7. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v1', value=False,
button_style='info', tooltip='Run FOI analysis version 1', icon='play')
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(db_tables.value,
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,
dbf_box, param_box, run_box, progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML(
'FOI procedures version 2 (does not require access to a database).\n '
, placeholder='FOI Information')
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality."""
)
shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',
'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)"""
)
img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,
disabled=True, button_style='', tooltips=['Upnload your base image',
'Get from object storage'])
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',
'.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes"""
)
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
pre_info = Label('4. Set FOI v2 Parameters.')
pre_heto_chec = HTML(
"""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
"""
)
pre_min_het = IntText(value=30, description='MIN:', tooltip=
'Minimum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_max_het = IntText(value=70, description='MAX:', tooltip=
'Maximum threshold for heterogeneity checks', disabled=False,
layout=Layout(width='150px'))
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip
='Minimum area for clusters selection.', disabled=False, layout=
Layout(width='200px'))
pre_pixel_connectivity = IntText(value=8, description=
'connectivity type:', tooltip=
'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',
disabled=False, layout=Layout(width='200px'))
pre_negative_buffer = IntText(value=-10, description='negative buffer:',
tooltip='Negative buffer to be applied on the FOI', disabled=False,
layout=Layout(width='200px'))
pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer, HBox([
pre_min_cluster_size, HTML(
'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'
)])])
run_info = Label('5. Run the FOI analysis.')
run_analysis = Button(description='Run FOI v2', value=False, disabled=
False, button_style='info', tooltip='Run FOI analysis version 2',
icon='play')
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f'{path_foi}vector/{shp_file.children[1].children[0].value}',
f'{path_foi}raster/{img_file.children[1].children[0].value}',
f'{path_foi}{yml_file.children[1].children[0].value}',
pre_negative_buffer.value, pre_min_het.value, pre_max_het.
value, pre_pixel_connectivity.value, pre_min_cluster_size.value
)
wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,
run_box, progress])
return wbox_v2
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import glob
from ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,
ToggleButtons, Output, HTML, Button,
FileUpload, IntText, RadioButtons)
from cbm.utils import config
from cbm.ipycbm.utils import settings_ds, cbm_widgets
from cbm.ipycbm.ipy_ext import ext_func
from cbm.foi import foi_v1
from cbm.datas import db
try:
from cbm.foi import foi_v2
except Exception as err:
print(err)
def foi_tab_v1():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
path_foi_func = foi_v1.path_foi_func
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 1 (requires access to a database).
""", placeholder='FOI Information')
# Connect to database
config_info = HTML(value="""1. Connect to database and object storage.<br>
FOI procedures need direct access to the database. In case there no
image is provided, access to object storage will be needed as well
to generate the base image from sentinel images.
""", placeholder='FOI Information')
config_conn = Button(
value=False,
button_style='info',
tooltip='Configure db connection.',
icon='cogs',
layout=Layout(width='40px')
)
config_conn_box = HBox([])
@config_conn.on_click
def config_conn_on_click(b):
if config_conn_box.children == ():
config_conn_box.children = [settings_ds.direct_conn()]
else:
config_conn_box.children = ()
config_box = VBox([config_info, config_conn,
config_conn_box])
# Spatial data to be tested
spatial_info = HTML(
"""2. Select the spatial data to be tested - parcels that will be
checked for heterogeneity and cardinality.<br>
- Select a table from the database""")
db_tables = Dropdown(
options=[],
description='db Tables:'
)
refresh_db_tables = Button(
value=False,
button_style='info',
tooltip='Get db tables.',
icon='refresh',
layout=Layout(width='40px')
)
@refresh_db_tables.on_click
def refresh_db_tables_on_click(b):
db_tables.options = db.tables(config.get_value(['set', 'db_conn']))
db_tables_box = HBox([db_tables, refresh_db_tables])
upload_shp = Button(
description='Create new table',
value=False,
button_style='info',
tooltip='upload_shp.',
icon='up'
)
upload_box = VBox([])
@upload_shp.on_click
def upload_shp_on_click(b):
if upload_box.children == ():
upload_box.children = [ext_func.upload_shp(path_foi, True)]
else:
upload_box.children = ()
spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])
# Thematic raster.
img_info = HTML(
"""3. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""4. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# Database functions
dbf_info = HTML("""5. Create database functions.<br>
- Import required database functions for FOI analysis to the database""")
dbf_insert = Button(
value=False,
button_style='info',
tooltip='Create functions.',
icon='fa-share-square'
)
@dbf_insert.on_click
def dbf_insert_on_click(b):
outlog('path_foi_func :', path_foi_func)
progress.clear_output()
try:
functions = glob.glob(f"{path_foi_func}*.func")
db = config.get_value(['set', 'db_conn'])
sche = config.get_value(['db', db, 'sche'])
user = config.get_value(['db', db, 'user'])
for f in functions:
db.insert_function(open(f).read().format(
schema=sche, owner=user))
outlog(f"The '{f}' Was imported to the database.")
finc_list = [
f"ipycbm_{f.split('/')[-1].split('.')[0]}, " for f in functions]
outlog(
f"The functions: {('').join(finc_list)} where added to the database")
except Exception as err:
outlog("Could not add functions to dattabase.", err)
dbf_box = VBox(
[dbf_info, dbf_insert])
# FOI Parameters
param_info = HTML(
"""6. Set FOI v1 Parameters""")
# heterogeneity_threshold
param_heto_info = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
param_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
layout=Layout(width='150px')
)
param_area_info = HTML("""Minimum area for clusters selection -
only clusters bigger from this threshold will be counted.
""")
param_area = IntText(
value=2000,
description='area:',
tooltip="Minimum area for clusters selection.",
layout=Layout(width='200px')
)
param_box = VBox([param_info,
param_heto_info, HBox([param_min_het, param_max_het]),
param_area_info, param_area
])
# Run FOI analysis
run_info = Label("7. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v1',
value=False,
button_style='info',
tooltip='Run FOI analysis version 1',
icon='play',
)
run_box = VBox([run_info, run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v1.main(
db_tables.value,
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
param_min_het.value, param_max_het.value, param_area.value)
wbox = VBox([foi_info,
config_box,
spatial_box,
img_box,
yml_box,
dbf_box,
param_box,
run_box,
progress])
return wbox
def foi_tab_v2():
path_foi = f"{config.get_value(['paths', 'temp'])}/foi/"
progress = Output()
def outlog(*text):
with progress:
print(*text)
foi_info = HTML("""FOI procedures version 2 (does not require access to a database).
""", placeholder='FOI Information')
# Vector file
shp_info = HTML(
"""1. Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality.""")
shp_file = cbm_widgets.get_files_dropdown(
f'{path_foi}vector', '', 'Select .shp', True, True)
shp_box = VBox([shp_info, shp_file])
# Thematic raster.
img_info = HTML(
"""2. Thematic raster - classification raster, or raster from other
source that will be used for testing heterogeneity and cardinality.<br>
- Upload or generate raster base image.
(Only upload is currently available)""")
img_option = ToggleButtons(
options=['Upload', 'Generate'],
value=None,
disabled=True,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Upnload your base image', 'Get from object storage']
)
def on_img_option_change(change):
if img_option.value == 'Upload':
img_box.children = [HBox([img_info, img_option, img_file])]
else:
img_box.children = ()
img_option.observe(on_img_option_change, 'value')
img_file = cbm_widgets.get_files_dropdown(
f'{path_foi}raster', '.tif, .tiff', 'Select Raster')
img_box = VBox([img_info, img_option, img_file])
# YAML File upload
yml_info = HTML(
"""3. YAML file that holds the classes form the thematic raster.<br>
- This can be also a simple list of values in the notebook
corespondence between pixel values and names for the classes""")
yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',
'Select YML')
yml_box = VBox([yml_info, yml_file])
# FOI Prerequisites
pre_info = Label("4. Set FOI v2 Parameters.")
# heterogeneity_threshold
pre_heto_chec = HTML("""
Minimum and maximum thresholds for heterogeneity checks. In the example,
any parcel with percentage of pixels for one class between 30 and 70 from
the total, will be considered heterogenous.
""")
pre_min_het = IntText(
value=30,
description='MIN:',
tooltip="Minimum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_max_het = IntText(
value=70,
description='MAX:',
tooltip="Maximum threshold for heterogeneity checks",
disabled=False,
layout=Layout(width='150px')
)
pre_heto_chec_box = HBox([pre_min_het, pre_max_het])
pre_min_cluster_size = IntText(
value=20,
description='pixels:',
tooltip="Minimum area for clusters selection.",
disabled=False,
layout=Layout(width='200px')
)
pre_pixel_connectivity = IntText(
value=8,
description='connectivity type:',
tooltip="Type of pixel connectivity in analysis. Accepted values: 4 or 8.",
disabled=False,
layout=Layout(width='200px')
)
pre_negative_buffer = IntText(
value=-10,
description='negative buffer:',
tooltip="Negative buffer to be applied on the FOI",
disabled=False,
layout=Layout(width='200px')
)
pre_box = VBox([
pre_info, pre_heto_chec, pre_heto_chec_box,
pre_pixel_connectivity, pre_negative_buffer,
HBox([pre_min_cluster_size,
HTML("Minimum area for clusters selection - only clusters bigger from this threshold will be counted.")])
])
# Run FOI analysis
run_info = Label("5. Run the FOI analysis.")
run_analysis = Button(
description='Run FOI v2',
value=False,
disabled=False,
button_style='info',
tooltip='Run FOI analysis version 2',
icon='play',
)
run_box = HBox([run_analysis])
@run_analysis.on_click
def run_analysis_on_click(b):
with progress:
foi_v2.main(
f"{path_foi}vector/{shp_file.children[1].children[0].value}",
f"{path_foi}raster/{img_file.children[1].children[0].value}",
f"{path_foi}{yml_file.children[1].children[0].value}",
pre_negative_buffer.value,
pre_min_het.value,
pre_max_het.value,
pre_pixel_connectivity.value,
pre_min_cluster_size.value)
wbox_v2 = VBox([foi_info,
shp_box,
img_box,
yml_box,
pre_box,
run_info,
run_box,
progress])
return wbox_v2
|
flexible
|
{
"blob_id": "2f9a081845685a4748c8b028ae4ee3a056a10284",
"index": 9779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-3": "<mask token>\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-4": "import os\nimport glob\nfrom ipywidgets import Text, Label, HBox, VBox, Layout, Dropdown, ToggleButtons, Output, HTML, Button, FileUpload, IntText, RadioButtons\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 1 (requires access to a database).\\n ',\n placeholder='FOI Information')\n config_info = HTML(value=\n \"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\"\n , placeholder='FOI Information')\n config_conn = Button(value=False, button_style='info', tooltip=\n 'Configure db connection.', icon='cogs', layout=Layout(width='40px'))\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n config_box = VBox([config_info, config_conn, config_conn_box])\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\"\n )\n db_tables = Dropdown(options=[], description='db Tables:')\n refresh_db_tables = Button(value=False, button_style='info', tooltip=\n 'Get db tables.', icon='refresh', layout=Layout(width='40px'))\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n db_tables_box = HBox([db_tables, refresh_db_tables])\n upload_shp = Button(description='Create new table', value=False,\n button_style='info', tooltip='upload_shp.', icon='up')\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='info', tooltips=[\n 'Upnload your base image', 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n dbf_info = HTML(\n \"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\"\n )\n dbf_insert = Button(value=False, button_style='info', tooltip=\n 'Create functions.', icon='fa-share-square')\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f'{path_foi_func}*.func')\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n for f in functions:\n db.insert_function(open(f).read().format(schema=sche, owner\n =user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in\n functions]\n outlog(\n f\"The functions: {''.join(finc_list)} where added to the database\"\n )\n except Exception as err:\n outlog('Could not add functions to dattabase.', err)\n dbf_box = VBox([dbf_info, dbf_insert])\n param_info = HTML('6. Set FOI v1 Parameters')\n param_heto_info = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n param_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', layout=Layout(width=\n '150px'))\n param_area_info = HTML(\n \"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\"\n )\n param_area = IntText(value=2000, description='area:', tooltip=\n 'Minimum area for clusters selection.', layout=Layout(width='200px'))\n param_box = VBox([param_info, param_heto_info, HBox([param_min_het,\n param_max_het]), param_area_info, param_area])\n run_info = Label('7. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v1', value=False,\n button_style='info', tooltip='Run FOI analysis version 1', icon='play')\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(db_tables.value,\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n param_min_het.value, param_max_het.value, param_area.value)\n wbox = VBox([foi_info, config_box, spatial_box, img_box, yml_box,\n dbf_box, param_box, run_box, progress])\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n foi_info = HTML(\n 'FOI procedures version 2 (does not require access to a database).\\n '\n , placeholder='FOI Information')\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\"\n )\n shp_file = cbm_widgets.get_files_dropdown(f'{path_foi}vector', '',\n 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\"\n )\n img_option = ToggleButtons(options=['Upload', 'Generate'], value=None,\n disabled=True, button_style='', tooltips=['Upnload your base image',\n 'Get from object storage'])\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(f'{path_foi}raster',\n '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\"\n )\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n pre_info = Label('4. Set FOI v2 Parameters.')\n pre_heto_chec = HTML(\n \"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\"\n )\n pre_min_het = IntText(value=30, description='MIN:', tooltip=\n 'Minimum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_max_het = IntText(value=70, description='MAX:', tooltip=\n 'Maximum threshold for heterogeneity checks', disabled=False,\n layout=Layout(width='150px'))\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(value=20, description='pixels:', tooltip\n ='Minimum area for clusters selection.', disabled=False, layout=\n Layout(width='200px'))\n pre_pixel_connectivity = IntText(value=8, description=\n 'connectivity type:', tooltip=\n 'Type of pixel connectivity in analysis. Accepted values: 4 or 8.',\n disabled=False, layout=Layout(width='200px'))\n pre_negative_buffer = IntText(value=-10, description='negative buffer:',\n tooltip='Negative buffer to be applied on the FOI', disabled=False,\n layout=Layout(width='200px'))\n pre_box = VBox([pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer, HBox([\n pre_min_cluster_size, HTML(\n 'Minimum area for clusters selection - only clusters bigger from this threshold will be counted.'\n )])])\n run_info = Label('5. Run the FOI analysis.')\n run_analysis = Button(description='Run FOI v2', value=False, disabled=\n False, button_style='info', tooltip='Run FOI analysis version 2',\n icon='play')\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f'{path_foi}vector/{shp_file.children[1].children[0].value}',\n f'{path_foi}raster/{img_file.children[1].children[0].value}',\n f'{path_foi}{yml_file.children[1].children[0].value}',\n pre_negative_buffer.value, pre_min_het.value, pre_max_het.\n value, pre_pixel_connectivity.value, pre_min_cluster_size.value\n )\n wbox_v2 = VBox([foi_info, shp_box, img_box, yml_box, pre_box, run_info,\n run_box, progress])\n return wbox_v2\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This file is part of CbM (https://github.com/ec-jrc/cbm).\n# Author : Konstantinos Anastasakis\n# Credits : GTCAP Team\n# Copyright : 2021 European Commission, Joint Research Centre\n# License : 3-Clause BSD\n\n\nimport os\nimport glob\nfrom ipywidgets import (Text, Label, HBox, VBox, Layout, Dropdown,\n ToggleButtons, Output, HTML, Button,\n FileUpload, IntText, RadioButtons)\n\nfrom cbm.utils import config\nfrom cbm.ipycbm.utils import settings_ds, cbm_widgets\nfrom cbm.ipycbm.ipy_ext import ext_func\nfrom cbm.foi import foi_v1\nfrom cbm.datas import db\ntry:\n from cbm.foi import foi_v2\nexcept Exception as err:\n print(err)\n\n\ndef foi_tab_v1():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n path_foi_func = foi_v1.path_foi_func\n\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 1 (requires access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Connect to database\n\n config_info = HTML(value=\"\"\"1. Connect to database and object storage.<br>\n FOI procedures need direct access to the database. In case there no\n image is provided, access to object storage will be needed as well\n to generate the base image from sentinel images.\n \"\"\", placeholder='FOI Information')\n config_conn = Button(\n value=False,\n button_style='info',\n tooltip='Configure db connection.',\n icon='cogs',\n layout=Layout(width='40px')\n )\n\n config_conn_box = HBox([])\n\n @config_conn.on_click\n def config_conn_on_click(b):\n if config_conn_box.children == ():\n config_conn_box.children = [settings_ds.direct_conn()]\n else:\n config_conn_box.children = ()\n\n config_box = VBox([config_info, config_conn,\n config_conn_box])\n\n # Spatial data to be tested\n spatial_info = HTML(\n \"\"\"2. Select the spatial data to be tested - parcels that will be\n checked for heterogeneity and cardinality.<br>\n - Select a table from the database\"\"\")\n\n db_tables = Dropdown(\n options=[],\n description='db Tables:'\n )\n refresh_db_tables = Button(\n value=False,\n button_style='info',\n tooltip='Get db tables.',\n icon='refresh',\n layout=Layout(width='40px')\n )\n\n @refresh_db_tables.on_click\n def refresh_db_tables_on_click(b):\n db_tables.options = db.tables(config.get_value(['set', 'db_conn']))\n\n db_tables_box = HBox([db_tables, refresh_db_tables])\n\n upload_shp = Button(\n description='Create new table',\n value=False,\n button_style='info',\n tooltip='upload_shp.',\n icon='up'\n )\n\n upload_box = VBox([])\n\n @upload_shp.on_click\n def upload_shp_on_click(b):\n if upload_box.children == ():\n upload_box.children = [ext_func.upload_shp(path_foi, True)]\n else:\n upload_box.children = ()\n spatial_box = VBox([spatial_info, upload_shp, upload_box, db_tables_box])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"3. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='info', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"4. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # Database functions\n dbf_info = HTML(\"\"\"5. Create database functions.<br>\n - Import required database functions for FOI analysis to the database\"\"\")\n\n dbf_insert = Button(\n value=False,\n button_style='info',\n tooltip='Create functions.',\n icon='fa-share-square'\n )\n\n @dbf_insert.on_click\n def dbf_insert_on_click(b):\n outlog('path_foi_func :', path_foi_func)\n progress.clear_output()\n try:\n functions = glob.glob(f\"{path_foi_func}*.func\")\n db = config.get_value(['set', 'db_conn'])\n sche = config.get_value(['db', db, 'sche'])\n user = config.get_value(['db', db, 'user'])\n\n for f in functions:\n db.insert_function(open(f).read().format(\n schema=sche, owner=user))\n outlog(f\"The '{f}' Was imported to the database.\")\n finc_list = [\n f\"ipycbm_{f.split('/')[-1].split('.')[0]}, \" for f in functions]\n outlog(\n f\"The functions: {('').join(finc_list)} where added to the database\")\n except Exception as err:\n outlog(\"Could not add functions to dattabase.\", err)\n\n dbf_box = VBox(\n [dbf_info, dbf_insert])\n\n # FOI Parameters\n param_info = HTML(\n \"\"\"6. Set FOI v1 Parameters\"\"\")\n\n # heterogeneity_threshold\n param_heto_info = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n param_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n param_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n layout=Layout(width='150px')\n )\n\n param_area_info = HTML(\"\"\"Minimum area for clusters selection -\n only clusters bigger from this threshold will be counted.\n \"\"\")\n param_area = IntText(\n value=2000,\n description='area:',\n tooltip=\"Minimum area for clusters selection.\",\n layout=Layout(width='200px')\n )\n\n param_box = VBox([param_info,\n param_heto_info, HBox([param_min_het, param_max_het]),\n param_area_info, param_area\n ])\n\n # Run FOI analysis\n run_info = Label(\"7. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v1',\n value=False,\n button_style='info',\n tooltip='Run FOI analysis version 1',\n icon='play',\n )\n run_box = VBox([run_info, run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v1.main(\n db_tables.value,\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n param_min_het.value, param_max_het.value, param_area.value)\n\n wbox = VBox([foi_info,\n config_box,\n spatial_box,\n img_box,\n yml_box,\n dbf_box,\n param_box,\n run_box,\n progress])\n\n return wbox\n\n\ndef foi_tab_v2():\n path_foi = f\"{config.get_value(['paths', 'temp'])}/foi/\"\n progress = Output()\n\n def outlog(*text):\n with progress:\n print(*text)\n\n foi_info = HTML(\"\"\"FOI procedures version 2 (does not require access to a database).\n \"\"\", placeholder='FOI Information')\n\n # Vector file\n shp_info = HTML(\n \"\"\"1. Spatial data to be tested -\n parcels that will be checked for heterogeneity and cardinality.\"\"\")\n shp_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}vector', '', 'Select .shp', True, True)\n shp_box = VBox([shp_info, shp_file])\n\n # Thematic raster.\n img_info = HTML(\n \"\"\"2. Thematic raster - classification raster, or raster from other\n source that will be used for testing heterogeneity and cardinality.<br>\n - Upload or generate raster base image.\n (Only upload is currently available)\"\"\")\n img_option = ToggleButtons(\n options=['Upload', 'Generate'],\n value=None,\n disabled=True,\n button_style='', # 'success', 'info', 'warning', 'danger' or ''\n tooltips=['Upnload your base image', 'Get from object storage']\n )\n\n def on_img_option_change(change):\n if img_option.value == 'Upload':\n img_box.children = [HBox([img_info, img_option, img_file])]\n else:\n img_box.children = ()\n img_option.observe(on_img_option_change, 'value')\n img_file = cbm_widgets.get_files_dropdown(\n f'{path_foi}raster', '.tif, .tiff', 'Select Raster')\n img_box = VBox([img_info, img_option, img_file])\n\n # YAML File upload\n yml_info = HTML(\n \"\"\"3. YAML file that holds the classes form the thematic raster.<br>\n - This can be also a simple list of values in the notebook\n corespondence between pixel values and names for the classes\"\"\")\n yml_file = cbm_widgets.get_files_dropdown(path_foi, '.yml, .yaml',\n 'Select YML')\n yml_box = VBox([yml_info, yml_file])\n\n # FOI Prerequisites\n pre_info = Label(\"4. Set FOI v2 Parameters.\")\n\n # heterogeneity_threshold\n pre_heto_chec = HTML(\"\"\"\n Minimum and maximum thresholds for heterogeneity checks. In the example,\n any parcel with percentage of pixels for one class between 30 and 70 from\n the total, will be considered heterogenous.\n \"\"\")\n pre_min_het = IntText(\n value=30,\n description='MIN:',\n tooltip=\"Minimum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_max_het = IntText(\n value=70,\n description='MAX:',\n tooltip=\"Maximum threshold for heterogeneity checks\",\n disabled=False,\n layout=Layout(width='150px')\n )\n pre_heto_chec_box = HBox([pre_min_het, pre_max_het])\n pre_min_cluster_size = IntText(\n value=20,\n description='pixels:',\n tooltip=\"Minimum area for clusters selection.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_pixel_connectivity = IntText(\n value=8,\n description='connectivity type:',\n tooltip=\"Type of pixel connectivity in analysis. Accepted values: 4 or 8.\",\n disabled=False,\n layout=Layout(width='200px')\n )\n pre_negative_buffer = IntText(\n value=-10,\n description='negative buffer:',\n tooltip=\"Negative buffer to be applied on the FOI\",\n disabled=False,\n layout=Layout(width='200px')\n )\n\n pre_box = VBox([\n pre_info, pre_heto_chec, pre_heto_chec_box,\n pre_pixel_connectivity, pre_negative_buffer,\n HBox([pre_min_cluster_size,\n HTML(\"Minimum area for clusters selection - only clusters bigger from this threshold will be counted.\")])\n ])\n\n # Run FOI analysis\n run_info = Label(\"5. Run the FOI analysis.\")\n run_analysis = Button(\n description='Run FOI v2',\n value=False,\n disabled=False,\n button_style='info',\n tooltip='Run FOI analysis version 2',\n icon='play',\n )\n run_box = HBox([run_analysis])\n\n @run_analysis.on_click\n def run_analysis_on_click(b):\n with progress:\n foi_v2.main(\n f\"{path_foi}vector/{shp_file.children[1].children[0].value}\",\n f\"{path_foi}raster/{img_file.children[1].children[0].value}\",\n f\"{path_foi}{yml_file.children[1].children[0].value}\",\n pre_negative_buffer.value,\n pre_min_het.value,\n pre_max_het.value,\n pre_pixel_connectivity.value,\n pre_min_cluster_size.value)\n\n wbox_v2 = VBox([foi_info,\n shp_box,\n img_box,\n yml_box,\n pre_box,\n run_info,\n run_box,\n progress])\n\n return wbox_v2\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import numpy as np
import imutils
import cv2
image = cv2.imread("D:\\Github\\python-opencv\\images\\trex.png")
cv2.imshow("Original", image)
cv2.waitKey(0)
(h, w) = image.shape[:2] # get height and width of the image
center = (w/2, h/2) # which point to rotate around
M = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix
rotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation
cv2. imshow("Rotated by 45 degrees", rotated)
cv2.waitKey(0)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 degrees", rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180", rotated)
cv2.waitKey(0)
|
normal
|
{
"blob_id": "4462fec6e0edc25530c93ffeeae2372c86fef2cc",
"index": 528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('Original', image)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\n<mask token>\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-4": "import numpy as np\nimport imutils\nimport cv2\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"step-5": "import numpy as np\nimport imutils\nimport cv2\n\nimage = cv2.imread(\"D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png\")\ncv2.imshow(\"Original\", image)\ncv2.waitKey(0)\n\n(h, w) = image.shape[:2] # get height and width of the image\ncenter = (w/2, h/2) # which point to rotate around\n\nM = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix\nrotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation\ncv2. imshow(\"Rotated by 45 degrees\", rotated)\ncv2.waitKey(0)\n\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by -90 degrees\", rotated)\ncv2.waitKey(0)\n\nrotated = imutils.rotate(image, 180)\ncv2.imshow(\"Rotated by 180\", rotated)\ncv2.waitKey(0)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, request, jsonify, make_response
app = Flask(__name__)
@app.route("/")
def hello():
# return render_template('chat.html')
return make_response(render_template('chat.html'),200)
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "98841630964dd9513e51c3f13bfdb0719600712d",
"index": 6941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return make_response(render_template('chat.html'), 200)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return make_response(render_template('chat.html'), 200)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request, jsonify, make_response\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return make_response(render_template('chat.html'), 200)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request, jsonify, make_response\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n # return render_template('chat.html')\n return make_response(render_template('chat.html'),200)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def disemvowel(s):
return s.translate(None, 'aeiouAEIOU')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def disemvowel(string):
returnString = ''
vowels = ['a', 'e', 'i', 'o', 'u']
upperVowels = ['A', 'E', 'I', 'O', 'U']
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
<|reserved_special_token_0|>
def disemvowel(s):
return s.translate(None, 'aeiouAEIOU')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def disemvowel(string):
returnString = ''
vowels = ['a', 'e', 'i', 'o', 'u']
upperVowels = ['A', 'E', 'I', 'O', 'U']
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
<|reserved_special_token_0|>
dis
def disemvowel(s):
return s.translate(None, 'aeiouAEIOU')
<|reserved_special_token_0|>
i
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def disemvowel(string):
returnString = ''
vowels = ['a', 'e', 'i', 'o', 'u']
upperVowels = ['A', 'E', 'I', 'O', 'U']
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
string = 'hEllo'
dis = disemvowel(string)
dis
def disemvowel(s):
return s.translate(None, 'aeiouAEIOU')
e = 'Hello'
i = disemvowel(e)
i
<|reserved_special_token_1|>
'''
Trolls are attacking your comment section!
A common way to deal with this situation is to remove all of the vowels from the trolls' comments, neutralizing the threat.
Your task is to write a function that takes a string and return a new string with all vowels removed.
For example, the string "This website is for losers LOL!" would become "Ths wbst s fr lsrs LL!".
Note: for this kata y isn't considered a vowel.
'''
#%%
def disemvowel(string):
returnString =""
vowels = ["a","e", "i", "o", "u"]
upperVowels = ["A", "E", "I", "O", "U"]
vowless = [i for i in string if i not in vowels and i not in upperVowels]
for letters in vowless:
returnString += letters
return returnString
string = "hEllo"
dis = disemvowel(string)
dis
#%%
def disemvowel(s):
return s.translate(None, "aeiouAEIOU")
e = "Hello"
i = disemvowel(e)
i
# %%
|
flexible
|
{
"blob_id": "4dea0967a0ee3e9eb3b46145739dfeb233f3a120",
"index": 5307,
"step-1": "<mask token>\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\n<mask token>\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\n<mask token>\ndis\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\n<mask token>\ni\n",
"step-4": "<mask token>\n\n\ndef disemvowel(string):\n returnString = ''\n vowels = ['a', 'e', 'i', 'o', 'u']\n upperVowels = ['A', 'E', 'I', 'O', 'U']\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n return returnString\n\n\nstring = 'hEllo'\ndis = disemvowel(string)\ndis\n\n\ndef disemvowel(s):\n return s.translate(None, 'aeiouAEIOU')\n\n\ne = 'Hello'\ni = disemvowel(e)\ni\n",
"step-5": "'''\nTrolls are attacking your comment section!\n\nA common way to deal with this situation is to remove all of the vowels from the trolls' comments, neutralizing the threat.\n\nYour task is to write a function that takes a string and return a new string with all vowels removed.\n\nFor example, the string \"This website is for losers LOL!\" would become \"Ths wbst s fr lsrs LL!\".\n\nNote: for this kata y isn't considered a vowel.\n\n\n'''\n#%%\ndef disemvowel(string):\n returnString =\"\"\n vowels = [\"a\",\"e\", \"i\", \"o\", \"u\"]\n upperVowels = [\"A\", \"E\", \"I\", \"O\", \"U\"]\n vowless = [i for i in string if i not in vowels and i not in upperVowels]\n for letters in vowless:\n returnString += letters\n\n return returnString\n\nstring = \"hEllo\"\ndis = disemvowel(string)\ndis\n\n\n\n#%%\ndef disemvowel(s):\n return s.translate(None, \"aeiouAEIOU\")\n\ne = \"Hello\"\ni = disemvowel(e)\ni\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df.to_csv('Tweets.csv', index=None, header=None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df1 = pd.read_csv('Tweets1.csv', names=['tweet'])
df2 = pd.read_csv('Tweets2.csv', names=['tweet'])
df3 = pd.read_csv('Tweets3.csv', names=['tweet'])
df = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
df.to_csv('Tweets.csv', index=None, header=None)
<|reserved_special_token_1|>
import pandas as pd
df1 = pd.read_csv('Tweets1.csv', names=['tweet'])
df2 = pd.read_csv('Tweets2.csv', names=['tweet'])
df3 = pd.read_csv('Tweets3.csv', names=['tweet'])
df = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
df.to_csv('Tweets.csv', index=None, header=None)
|
flexible
|
{
"blob_id": "7d6196268b85861e76efaa53e14976f2eae09405",
"index": 3226,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-3": "<mask token>\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-4": "import pandas as pd\ndf1 = pd.read_csv('Tweets1.csv', names=['tweet'])\ndf2 = pd.read_csv('Tweets2.csv', names=['tweet'])\ndf3 = pd.read_csv('Tweets3.csv', names=['tweet'])\ndf = pd.concat([df1, df2, df3], axis=0, join='outer', ignore_index=False,\n keys=None, levels=None, names=None, verify_integrity=False, copy=True)\ndf.to_csv('Tweets.csv', index=None, header=None)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.2.3 on 2021-07-02 08:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('khovan', '0003_nhapkho'),
]
operations = [
migrations.AddField(
model_name='phieunhaphang',
name='xulykho',
field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "016255d74ccf4ac547e4b212d33bb9a39295c830",
"index": 2715,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-07-02 08:18\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('khovan', '0003_nhapkho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='phieunhaphang',\n name='xulykho',\n field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')
plt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,
color='#F78F1E')
plt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')
plt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=
'#FF3300')
ax.set_ylabel('Average MB/s', fontweight='bold')
ax.set_title('Average MBs of Random Numbers Generated in a Second',
fontweight='bold')
ax.set_xticks([(p + 1.5 * width) for p in pos])
ax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',
'Ubuntu 18.04'])
plt.xlim(min(pos) - width, max(pos) + width * 4)
plt.ylim([0, 10000])
plt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'
], loc='upper left')
plt.grid()
plt.savefig('barchart_compare.png')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')
em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')
aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')
agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')
df = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),
agni_data_df.mean()], axis=1).T
pos = list(range(len(df['Mersenne Twister'])))
width = 0.2
fig, ax = plt.subplots(figsize=(10, 5))
plt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')
plt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,
color='#F78F1E')
plt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')
plt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=
'#FF3300')
ax.set_ylabel('Average MB/s', fontweight='bold')
ax.set_title('Average MBs of Random Numbers Generated in a Second',
fontweight='bold')
ax.set_xticks([(p + 1.5 * width) for p in pos])
ax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',
'Ubuntu 18.04'])
plt.xlim(min(pos) - width, max(pos) + width * 4)
plt.ylim([0, 10000])
plt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'
], loc='upper left')
plt.grid()
plt.savefig('barchart_compare.png')
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')
em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')
aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')
agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')
df = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),
agni_data_df.mean()], axis=1).T
pos = list(range(len(df['Mersenne Twister'])))
width = 0.2
fig, ax = plt.subplots(figsize=(10, 5))
plt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')
plt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,
color='#F78F1E')
plt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')
plt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=
'#FF3300')
ax.set_ylabel('Average MB/s', fontweight='bold')
ax.set_title('Average MBs of Random Numbers Generated in a Second',
fontweight='bold')
ax.set_xticks([(p + 1.5 * width) for p in pos])
ax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',
'Ubuntu 18.04'])
plt.xlim(min(pos) - width, max(pos) + width * 4)
plt.ylim([0, 10000])
plt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'
], loc='upper left')
plt.grid()
plt.savefig('barchart_compare.png')
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
r_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')
em_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')
aishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')
agni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')
df = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T
# Setting the positions and width for the bars
pos = list(range(len(df['Mersenne Twister'])))
width = 0.2
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data,
# in position pos,
plt.bar(pos,
#using df['pre_score'] data,
df['Mersenne Twister'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#EE3224')
# with label the first value in first_name
#label=df['first_name'][0])
# Create a bar with mid_score data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['mid_score'] data,
df['Xorshift 128+'],
# of width
width,
# with alpha 0.5
alpha=0.5,
# with color
color='#F78F1E')
# with label the second value in first_name
#label=df['first_name'][1])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['post_score'] data,
df['SPCG64'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FFC222')
# with label the third value in first_name
#label=df['first_name'][2])
# Create a bar with post_score data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['post_score'] data,
df['Xoroshiro 128+'],
# of width
width,
# with alpha 0.5
#alpha=0.5,
# with color
color='#FF3300')
# with label the third value in first_name
#label=df['first_name'][2])
# Set the y axis label
ax.set_ylabel('Average MB/s',fontweight='bold')
# Set the chart's title
ax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, 10000] )
# Adding the legend and showing the plot
plt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')
plt.grid()
#plt.show()
plt.savefig('barchart_compare.png')
|
flexible
|
{
"blob_id": "467b919f6953737eedd3f99596df244bd1177575",
"index": 5411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-3": "<mask token>\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\ndf = pd.concat([aishah_data_df.mean(), em_data_df.mean(), r_data_df.mean(),\n agni_data_df.mean()], axis=1).T\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\nfig, ax = plt.subplots(figsize=(10, 5))\nplt.bar(pos, df['Mersenne Twister'], width, alpha=0.5, color='#EE3224')\nplt.bar([(p + width) for p in pos], df['Xorshift 128+'], width, alpha=0.5,\n color='#F78F1E')\nplt.bar([(p + width * 2) for p in pos], df['SPCG64'], width, color='#FFC222')\nplt.bar([(p + width * 3) for p in pos], df['Xoroshiro 128+'], width, color=\n '#FF3300')\nax.set_ylabel('Average MB/s', fontweight='bold')\nax.set_title('Average MBs of Random Numbers Generated in a Second',\n fontweight='bold')\nax.set_xticks([(p + 1.5 * width) for p in pos])\nax.set_xticklabels(['MacBook 2017', 'MacBook 2015', 'MacBook 2011',\n 'Ubuntu 18.04'])\nplt.xlim(min(pos) - width, max(pos) + width * 4)\nplt.ylim([0, 10000])\nplt.legend(['Mersenne Twister', 'Xorshift 128+', 'SPCG64', 'Xoroshiro 128+'\n ], loc='upper left')\nplt.grid()\nplt.savefig('barchart_compare.png')\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nr_data_df = pd.read_csv('./Shootout Data/Shootout_Mac2017.csv')\nem_data_df = pd.read_csv('./Shootout Data/Shootout_Emily.csv')\naishah_data_df = pd.read_csv('./Shootout Data/Shootout_Aishah_Mac2011.csv')\nagni_data_df = pd.read_csv('./Shootout Data/Shootout_Agni.csv')\n\ndf = pd.concat([aishah_data_df.mean(),em_data_df.mean(),r_data_df.mean(),agni_data_df.mean()],axis=1).T\n\n\n# Setting the positions and width for the bars\npos = list(range(len(df['Mersenne Twister'])))\nwidth = 0.2\n\n# Plotting the bars\nfig, ax = plt.subplots(figsize=(10,5))\n\n# Create a bar with pre_score data,\n# in position pos,\nplt.bar(pos,\n #using df['pre_score'] data,\n df['Mersenne Twister'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#EE3224')\n # with label the first value in first_name\n #label=df['first_name'][0])\n\n# Create a bar with mid_score data,\n# in position pos + some width buffer,\nplt.bar([p + width for p in pos],\n #using df['mid_score'] data,\n df['Xorshift 128+'],\n # of width\n width,\n # with alpha 0.5\n alpha=0.5,\n # with color\n color='#F78F1E')\n # with label the second value in first_name\n #label=df['first_name'][1])\n\n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*2 for p in pos],\n #using df['post_score'] data,\n df['SPCG64'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FFC222')\n # with label the third value in first_name\n #label=df['first_name'][2])\n \n# Create a bar with post_score data,\n# in position pos + some width buffer,\nplt.bar([p + width*3 for p in pos],\n #using df['post_score'] data,\n df['Xoroshiro 128+'],\n # of width\n width,\n # with alpha 0.5\n #alpha=0.5,\n # with color\n color='#FF3300')\n # with label the third value in first_name\n #label=df['first_name'][2])\n\n# Set the y axis label\nax.set_ylabel('Average MB/s',fontweight='bold')\n\n# Set the chart's title\nax.set_title('Average MBs of Random Numbers Generated in a Second',fontweight='bold')\n\n# Set the position of the x ticks\nax.set_xticks([p + 1.5 * width for p in pos])\n\n# Set the labels for the x ticks\nax.set_xticklabels(['MacBook 2017','MacBook 2015','MacBook 2011','Ubuntu 18.04'])\n\n# Setting the x-axis and y-axis limits\nplt.xlim(min(pos)-width, max(pos)+width*4)\nplt.ylim([0, 10000] )\n\n# Adding the legend and showing the plot\nplt.legend(['Mersenne Twister','Xorshift 128+', 'SPCG64','Xoroshiro 128+'], loc='upper left')\nplt.grid()\n#plt.show()\nplt.savefig('barchart_compare.png')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Line(MapBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return ("<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".
format(self.id, self.len, self.p0, self.p1, self.map_id))
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(self.
id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>"
.format(self.id, self.name, self.type, self.population, self.
armor, self.product, self.replenishment, self.map_id, self.
point_id))
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return (
"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>"
.format(self.id, self.name, self.date, self.map_name, self.
num_players))
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return (
"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>"
.format(self.id, self.game_id, self.code, self.message, self.date))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Map(MapBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Line(MapBase):
__tablename__ = 'line'
id = Column(Integer, primary_key=True)
len = Column(Integer)
p0 = Column(Integer)
p1 = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
def __repr__(self):
return ("<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".
format(self.id, self.len, self.p0, self.p1, self.map_id))
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(self.
id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>"
.format(self.id, self.name, self.type, self.population, self.
armor, self.product, self.replenishment, self.map_id, self.
point_id))
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return (
"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>"
.format(self.id, self.name, self.date, self.map_name, self.
num_players))
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return (
"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>"
.format(self.id, self.game_id, self.code, self.message, self.date))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Map(MapBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return "<Map(id='{}', name='{}', size_x='{}', size_y='{}')>".format(
self.id, self.name, self.size_x, self.size_y)
class Line(MapBase):
__tablename__ = 'line'
id = Column(Integer, primary_key=True)
len = Column(Integer)
p0 = Column(Integer)
p1 = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
def __repr__(self):
return ("<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".
format(self.id, self.len, self.p0, self.p1, self.map_id))
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(self.
id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>"
.format(self.id, self.name, self.type, self.population, self.
armor, self.product, self.replenishment, self.map_id, self.
point_id))
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return (
"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>"
.format(self.id, self.name, self.date, self.map_name, self.
num_players))
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return (
"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>"
.format(self.id, self.game_id, self.code, self.message, self.date))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Map(MapBase):
__tablename__ = 'map'
id = Column(Integer, primary_key=True)
name = Column(String)
size_x = Column(Integer)
size_y = Column(Integer)
lines = relationship('Line', backref='map', lazy='dynamic')
points = relationship('Point', backref='map', lazy='dynamic')
posts = relationship('Post', backref='map', lazy='dynamic')
def __repr__(self):
return "<Map(id='{}', name='{}', size_x='{}', size_y='{}')>".format(
self.id, self.name, self.size_x, self.size_y)
class Line(MapBase):
__tablename__ = 'line'
id = Column(Integer, primary_key=True)
len = Column(Integer)
p0 = Column(Integer)
p1 = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
def __repr__(self):
return ("<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".
format(self.id, self.len, self.p0, self.p1, self.map_id))
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(self.
id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>"
.format(self.id, self.name, self.type, self.population, self.
armor, self.product, self.replenishment, self.map_id, self.
point_id))
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return (
"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>"
.format(self.id, self.name, self.date, self.map_name, self.
num_players))
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return (
"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>"
.format(self.id, self.game_id, self.code, self.message, self.date))
<|reserved_special_token_1|>
""" DB models.
"""
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from db.session import map_engine, replay_engine
MapBase = declarative_base(bind=map_engine)
ReplayBase = declarative_base(bind=replay_engine)
class Map(MapBase):
__tablename__ = 'map'
id = Column(Integer, primary_key=True)
name = Column(String)
size_x = Column(Integer)
size_y = Column(Integer)
lines = relationship('Line', backref='map', lazy='dynamic')
points = relationship('Point', backref='map', lazy='dynamic')
posts = relationship('Post', backref='map', lazy='dynamic')
def __repr__(self):
return "<Map(id='{}', name='{}', size_x='{}', size_y='{}')>".format(
self.id, self.name, self.size_x, self.size_y)
class Line(MapBase):
__tablename__ = 'line'
id = Column(Integer, primary_key=True)
len = Column(Integer)
p0 = Column(Integer)
p1 = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
def __repr__(self):
return "<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>".format(
self.id, self.len, self.p0, self.p1, self.map_id)
class Point(MapBase):
__tablename__ = 'point'
id = Column(Integer, primary_key=True)
map_id = Column(Integer, ForeignKey('map.id'))
x = Column(Integer)
y = Column(Integer)
posts = relationship('Post', backref='point', lazy='dynamic')
def __repr__(self):
return "<Point(id='{}', map_id='{}', x='{}', y='{}')>".format(
self.id, self.map_id, self.x, self.y)
class Post(MapBase):
__tablename__ = 'post'
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(Integer)
population = Column(Integer)
armor = Column(Integer)
product = Column(Integer)
replenishment = Column(Integer)
map_id = Column(Integer, ForeignKey('map.id'))
point_id = Column(Integer, ForeignKey('point.id'))
def __repr__(self):
return (
"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', "
"product='{}', replenishment='{}', map_id='{}', point_id='{}')>".format(
self.id, self.name, self.type, self.population, self.armor,
self.product, self.replenishment, self.map_id, self.point_id
)
)
class Game(ReplayBase):
__tablename__ = 'game'
id = Column(Integer, primary_key=True)
name = Column(String)
date = Column(DateTime)
map_name = Column(String)
actions = relationship('Action', backref='game', lazy='dynamic')
num_players = Column(Integer)
def __repr__(self):
return "<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>".format(
self.id, self.name, self.date, self.map_name, self.num_players)
class Action(ReplayBase):
__tablename__ = 'action'
id = Column(Integer, primary_key=True)
game_id = Column(Integer, ForeignKey('game.id'))
code = Column(Integer)
message = Column(String)
date = Column(DateTime)
def __repr__(self):
return "<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>".format(
self.id, self.game_id, self.code, self.message, self.date)
|
flexible
|
{
"blob_id": "6b3cb7a42c8bc665e35206b135f6aefea3439758",
"index": 7381,
"step-1": "<mask token>\n\n\nclass Line(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-2": "<mask token>\n\n\nclass Map(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-3": "<mask token>\n\n\nclass Map(MapBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-4": "<mask token>\n\n\nclass Map(MapBase):\n __tablename__ = 'map'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n size_x = Column(Integer)\n size_y = Column(Integer)\n lines = relationship('Line', backref='map', lazy='dynamic')\n points = relationship('Point', backref='map', lazy='dynamic')\n posts = relationship('Post', backref='map', lazy='dynamic')\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n __tablename__ = 'line'\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return (\"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".\n format(self.id, self.len, self.p0, self.p1, self.map_id))\n\n\nclass Point(MapBase):\n __tablename__ = 'point'\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(self.\n id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n __tablename__ = 'post'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', product='{}', replenishment='{}', map_id='{}', point_id='{}')>\"\n .format(self.id, self.name, self.type, self.population, self.\n armor, self.product, self.replenishment, self.map_id, self.\n point_id))\n\n\nclass Game(ReplayBase):\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return (\n \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\"\n .format(self.id, self.name, self.date, self.map_name, self.\n num_players))\n\n\nclass Action(ReplayBase):\n __tablename__ = 'action'\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return (\n \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\"\n .format(self.id, self.game_id, self.code, self.message, self.date))\n",
"step-5": "\"\"\" DB models.\n\"\"\"\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nfrom db.session import map_engine, replay_engine\n\nMapBase = declarative_base(bind=map_engine)\nReplayBase = declarative_base(bind=replay_engine)\n\n\nclass Map(MapBase):\n\n __tablename__ = 'map'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n size_x = Column(Integer)\n size_y = Column(Integer)\n lines = relationship('Line', backref='map', lazy='dynamic')\n points = relationship('Point', backref='map', lazy='dynamic')\n posts = relationship('Post', backref='map', lazy='dynamic')\n\n def __repr__(self):\n return \"<Map(id='{}', name='{}', size_x='{}', size_y='{}')>\".format(\n self.id, self.name, self.size_x, self.size_y)\n\n\nclass Line(MapBase):\n\n __tablename__ = 'line'\n\n id = Column(Integer, primary_key=True)\n len = Column(Integer)\n p0 = Column(Integer)\n p1 = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n\n def __repr__(self):\n return \"<Line(id='{}', len='{}', p0='{}', p1='{}', map_id='{}')>\".format(\n self.id, self.len, self.p0, self.p1, self.map_id)\n\n\nclass Point(MapBase):\n\n __tablename__ = 'point'\n\n id = Column(Integer, primary_key=True)\n map_id = Column(Integer, ForeignKey('map.id'))\n x = Column(Integer)\n y = Column(Integer)\n posts = relationship('Post', backref='point', lazy='dynamic')\n\n def __repr__(self):\n return \"<Point(id='{}', map_id='{}', x='{}', y='{}')>\".format(\n self.id, self.map_id, self.x, self.y)\n\n\nclass Post(MapBase):\n\n __tablename__ = 'post'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n type = Column(Integer)\n population = Column(Integer)\n armor = Column(Integer)\n product = Column(Integer)\n replenishment = Column(Integer)\n map_id = Column(Integer, ForeignKey('map.id'))\n point_id = Column(Integer, ForeignKey('point.id'))\n\n def __repr__(self):\n return (\n \"<Post(id='{}', name='{}', type='{}', population='{}', armor='{}', \"\n \"product='{}', replenishment='{}', map_id='{}', point_id='{}')>\".format(\n self.id, self.name, self.type, self.population, self.armor,\n self.product, self.replenishment, self.map_id, self.point_id\n )\n )\n\n\nclass Game(ReplayBase):\n\n __tablename__ = 'game'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n date = Column(DateTime)\n map_name = Column(String)\n actions = relationship('Action', backref='game', lazy='dynamic')\n num_players = Column(Integer)\n\n def __repr__(self):\n return \"<Game(id='{}', name='{}', date='{}', map_name='{}', num_players='{}')>\".format(\n self.id, self.name, self.date, self.map_name, self.num_players)\n\n\nclass Action(ReplayBase):\n\n __tablename__ = 'action'\n\n id = Column(Integer, primary_key=True)\n game_id = Column(Integer, ForeignKey('game.id'))\n code = Column(Integer)\n message = Column(String)\n date = Column(DateTime)\n\n def __repr__(self):\n return \"<Action(id='{}', game_id='{}', code='{}', message='{}', date='{}')>\".format(\n self.id, self.game_id, self.code, self.message, self.date)\n",
"step-ids": [
14,
16,
17,
18,
21
]
}
|
[
14,
16,
17,
18,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('--batch_size', help='batch_size', required=False,
default=32)
parser.add_argument('--data_size', help='data_size', required=False,
default=1700)
parser.add_argument('--num_intra_threads', help='num_intra_threads',
required=False, default=19)
parser.add_argument('--num_batches', help='num_batches', required=False,
default=5000000)
parser.add_argument('--device', help='device', required=False, default='gpu')
<|reserved_special_token_0|>
with tf.device('/' + device + ':0'):
X = tf.placeholder('float')
Y = tf.placeholder('float')
W = tf.Variable(rng.randn(), name='weight')
b = tf.Variable(rng.randn(), name='bias')
pred = tf.add(tf.multiply(X, W), b)
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
<|reserved_special_token_0|>
with tf.Session(config=newConfig) as sess:
sess.run(init)
for epoch in range(training_epochs):
for x, y in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rng = numpy.random
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='batch_size', required=False,
default=32)
parser.add_argument('--data_size', help='data_size', required=False,
default=1700)
parser.add_argument('--num_intra_threads', help='num_intra_threads',
required=False, default=19)
parser.add_argument('--num_batches', help='num_batches', required=False,
default=5000000)
parser.add_argument('--device', help='device', required=False, default='gpu')
args = vars(parser.parse_args())
batch_size = int(args['batch_size'])
data_size = int(args['data_size'])
num_intra_threads = int(args['num_intra_threads'])
num_batches = int(args['num_batches'])
device = args['device']
learning_rate = 0.01
training_epochs = num_batches
display_step = 50
n_samples = data_size
train_X = rng.rand(1, n_samples)
train_Y = rng.rand(1, n_samples)
with tf.device('/' + device + ':0'):
X = tf.placeholder('float')
Y = tf.placeholder('float')
W = tf.Variable(rng.randn(), name='weight')
b = tf.Variable(rng.randn(), name='bias')
pred = tf.add(tf.multiply(X, W), b)
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
newConfig = tf.ConfigProto()
newConfig.intra_op_parallelism_threads = num_intra_threads
with tf.Session(config=newConfig) as sess:
sess.run(init)
for epoch in range(training_epochs):
for x, y in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
import tensorflow as tf
import argparse
import numpy
rng = numpy.random
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='batch_size', required=False,
default=32)
parser.add_argument('--data_size', help='data_size', required=False,
default=1700)
parser.add_argument('--num_intra_threads', help='num_intra_threads',
required=False, default=19)
parser.add_argument('--num_batches', help='num_batches', required=False,
default=5000000)
parser.add_argument('--device', help='device', required=False, default='gpu')
args = vars(parser.parse_args())
batch_size = int(args['batch_size'])
data_size = int(args['data_size'])
num_intra_threads = int(args['num_intra_threads'])
num_batches = int(args['num_batches'])
device = args['device']
learning_rate = 0.01
training_epochs = num_batches
display_step = 50
n_samples = data_size
train_X = rng.rand(1, n_samples)
train_Y = rng.rand(1, n_samples)
with tf.device('/' + device + ':0'):
X = tf.placeholder('float')
Y = tf.placeholder('float')
W = tf.Variable(rng.randn(), name='weight')
b = tf.Variable(rng.randn(), name='bias')
pred = tf.add(tf.multiply(X, W), b)
cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
newConfig = tf.ConfigProto()
newConfig.intra_op_parallelism_threads = num_intra_threads
with tf.Session(config=newConfig) as sess:
sess.run(init)
for epoch in range(training_epochs):
for x, y in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
<|reserved_special_token_1|>
'''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
import argparse
import numpy
rng = numpy.random
#"python tf_cnn_benchmarks.py --device=cpu --data_format=NHWC --num_warmup_batches=0 --model=lenet --batch_size=32 --num_intra_threads=19 --num_batches=3750"
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='batch_size', required=False, default=32)
parser.add_argument('--data_size', help='data_size', required=False, default=1700)
parser.add_argument('--num_intra_threads', help='num_intra_threads', required=False, default=19)
parser.add_argument('--num_batches', help='num_batches', required=False, default=5000000)
parser.add_argument('--device', help='device', required=False, default='gpu')
args = vars(parser.parse_args())
batch_size = int(args['batch_size'])
data_size = int(args['data_size'])
num_intra_threads =int(args['num_intra_threads'])
num_batches =int(args['num_batches'])
device =args['device']
# Parameters
learning_rate = 0.01
training_epochs = num_batches
display_step = 50
# Training Data
#train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1])
#train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3])
#n_samples = train_X.shape[0]
n_samples=data_size
train_X=rng.rand(1,n_samples)
train_Y=rng.rand(1,n_samples)
with tf.device('/'+device+':0'):
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# gpu share
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
# Launch the graph
newConfig = tf.ConfigProto()
newConfig.intra_op_parallelism_threads = num_intra_threads
with tf.Session(config=newConfig) as sess:
# with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
|
flexible
|
{
"blob_id": "2e8d39d6d72672de8e4eac8295b90d68b1dff938",
"index": 9007,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\n<mask token>\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\n<mask token>\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-3": "<mask token>\nrng = numpy.random\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\nargs = vars(parser.parse_args())\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads = int(args['num_intra_threads'])\nnum_batches = int(args['num_batches'])\ndevice = args['device']\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\nn_samples = data_size\ntrain_X = rng.rand(1, n_samples)\ntrain_Y = rng.rand(1, n_samples)\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport tensorflow as tf\nimport argparse\nimport numpy\nrng = numpy.random\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\nargs = vars(parser.parse_args())\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads = int(args['num_intra_threads'])\nnum_batches = int(args['num_batches'])\ndevice = args['device']\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\nn_samples = data_size\ntrain_X = rng.rand(1, n_samples)\ntrain_Y = rng.rand(1, n_samples)\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-5": "'''\nA linear regression learning algorithm example using TensorFlow library.\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport argparse\n\nimport numpy\nrng = numpy.random\n\n#\"python tf_cnn_benchmarks.py --device=cpu --data_format=NHWC --num_warmup_batches=0 --model=lenet --batch_size=32 --num_intra_threads=19 --num_batches=3750\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False, default=32)\nparser.add_argument('--data_size', help='data_size', required=False, default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads', required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False, default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\n\nargs = vars(parser.parse_args())\n\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads =int(args['num_intra_threads'])\nnum_batches =int(args['num_batches'])\ndevice =args['device']\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\n\n# Training Data\n#train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) \n#train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n#n_samples = train_X.shape[0]\n\nn_samples=data_size\ntrain_X=rng.rand(1,n_samples)\ntrain_Y=rng.rand(1,n_samples)\n\n\nwith tf.device('/'+device+':0'):\n # tf Graph Input\n X = tf.placeholder(\"float\")\n Y = tf.placeholder(\"float\")\n\n # Set model weights\n W = tf.Variable(rng.randn(), name=\"weight\")\n b = tf.Variable(rng.randn(), name=\"bias\")\n\n # Construct a linear model\n pred = tf.add(tf.multiply(X, W), b)\n\n # Mean squared error\n cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)\n # Gradient descent\n # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n # Initializing the variables\n init = tf.global_variables_initializer()\n\n # gpu share\n#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\n\n# Launch the graph\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n# with tf.Session() as sess:\n sess.run(init)\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.