index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
743
|
radrumond/hidra
|
refs/heads/master
|
/archs/fcn.py
|
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import os
import numpy as np
import tensorflow as tf
from archs.maml import MAML
class Model(MAML):
def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size)
def dense_weights(self):
weights = {}
cells = {}
initializer = tf.contrib.layers.xavier_initializer()
print("Creating/loading Weights")
divider = 1
inic = 1
filters = 64
finals = 64
if self.isMIN:
divider = 2
inic = 3
finals = 800
filters = 32
with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer)
weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant)
"""weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )"""
print("Done Creating/loading Weights")
return weights, cells
def forward(self,x,weights, training):
conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
# print(conv4)
# bn = tf.squeeze(conv4,axis=(1,2))
bn = tf.layers.Flatten()(conv4)
# tf.reshape(bn, [3244,234])
fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"])
# bn = tf.reshape(bn,[-1,])
return fc1
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
744
|
radrumond/hidra
|
refs/heads/master
|
/train.py
|
import numpy as np
import tensorflow as tf
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
import time
def train( m, mt, # m is the model foir training, mt is the model for testing
data_sampler, # Creates the data generator for training and testing
min_classes, # minimum amount of classes
max_classes, # maximum || || ||
train_shots, # number of samples per class (train)
test_shots, # number of samples per class (test)
meta_batch, # Number of tasks
meta_iters, # Number of iterations
test_iters, # Iterations in Test
train_step,
name): # Experiment name for experiments
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# bnorms = [v for v in tf.global_variables() if "bn" in v.name]
#---------Performance Tracking lists---------------------------------------
losses = []
temp_yp = []
temp_ypn= []
nls = []
aps = []
buffer = []
lossesB = []
#--------------------------------------------------------------------------
#---------Load train and test data-sets------------------------------------
train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"train")
if mt is not None:
test_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test" )
m.loadWeights(sess, name, step=str(int(train_step)), model_name=name+".ckpt")
#--------------------------------------------------------------------------
#TRAIN LOOP
print("Starting meta training:")
start = time.time()
for i in range(meta_iters):
xb1,yb1,xb2,yb2 = next(train_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
if m.maml_n == 2: # in case it uses hydra master node, it should re-assign the output nodes from the master
sess.run(m.init_assign, feed_dict={m.label_n:[5]})
l,_,vals,ps=sess.run([m.train_loss,m.meta_op,m.val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
m.train_yb: yb1,
m.val_xb:xb2,
m.val_yb:yb2,
m.label_n:num_l})
if m.maml_n == 2: # in case it uses hydra master node, it should update the master
sess.run(m.final_assign,feed_dict={m.label_n:num_l})
losses.append(vals)
lossesB.append(vals)
buffer.append(l)
#Calculate accuaracies
aux = []
tmp_pred = np.argmax(np.reshape(ps[-1],[-1,num_l[0]]),axis=-1)
tmp_true = np.argmax(np.reshape(yb2,[-1,num_l[0]]),axis=-1)
for ccci in range(num_l[0]):
tmp_idx = np.where(tmp_true==ccci)[0]
#print(tmp_idx)
aux.append(np.mean(tmp_pred[tmp_idx]==tmp_true[tmp_idx]))
temp_yp.append(np.mean(tmp_pred==tmp_true))
temp_ypn.append(aux)
#EVALUATE and PRINT
if i%100==0:
testString = ""
#If we give a test model, it will test using the weights from train
if mt is not None and i%1000==0:
lossestest = []
buffertest = []
lossesBtest = []
temp_yptest = []
for z in range(100):
if m.maml_n == 2:
sess.run(mt.init_assign, feed_dict={mt.label_n:[5]})
xb1,yb1,xb2,yb2 = next(test_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
l,vals,ps=sess.run([mt.test_train_loss,mt.test_val_losses,mt.val_predictions],feed_dict={mt.train_xb: xb1,
mt.train_yb: yb1,
mt.val_xb:xb2,
mt.val_yb:yb2,
mt.label_n:num_l})
lossestest.append(vals)
lossesBtest.append(vals)
buffertest.append(l)
temp_yptest.append(np.mean(np.argmax(ps[-1],axis=-1)==np.argmax(yb2,axis=-1)))
testString = f"\n TEST: TLoss {np.mean(buffertest):.3f} VLoss {np.mean(lossesBtest,axis=0)[-1]:.3f}, ACCURACY {np.mean(temp_yptest):.4f}"
print(f"Epoch {i}: TLoss {np.mean(buffer):.4f}, VLoss {np.mean(lossesB,axis=0)[-1]:.4f},",
f"Accuracy {np.mean(temp_yp):.4}", f", Per label acc: {[float('%.4f' % elem) for elem in aux]}", f"Finished in {time.time()-start}s",testString)
buffer = []
lossesB = []
temp_yp = []
start = time.time()
# f"\n TRUE: {yb2}\n PRED: {ps}")
if i%5000==0:
print("Saving...")
m.saveWeights(sess, name, i, model_name=name+".ckpt")
m.saveWeights(sess, name, i, model_name=name+".ckpt")
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
745
|
radrumond/hidra
|
refs/heads/master
|
/archs/maml.py
|
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import os
import numpy as np
import tensorflow as tf
class MAML:
def __init__(self,train_lr,meta_lr,image_shape, isMIN, label_size=2):
self.train_lr = train_lr
self.meta_lr = meta_lr
self.image_shape = image_shape
self.isMIN = isMIN
self.saver = None
self.label_size = label_size
self.finals = 64
self.maml_n = 1
if isMIN:
self.finals = 800
def build(self, K, meta_batchsz, mode='train'):
# Meta batch of tasks
self.train_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
self.train_yb = tf.placeholder(tf.float32, [None,None,None])
self.val_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
self.val_yb = tf.placeholder(tf.float32, [None,None,None])
self.label_n = tf.placeholder(tf.int32 , 1, name="num_labs")
#Initialize weights
self.weights, self.cells = self.dense_weights()
training = True if mode is 'train' else False
# Handle one task update
def meta_task(inputs):
train_x, train_y, val_x, val_y = inputs
val_preds, val_losses = [], []
train_pred = self.forward(train_x, self.weights, training)
train_loss = tf.losses.softmax_cross_entropy(train_y,train_pred)
grads = tf.gradients(train_loss, list(self.weights.values()))
gvs = dict(zip(self.weights.keys(), grads))
a=[self.weights[key] - self.train_lr * gvs[key] for key in self.weights.keys()]
# for key in self.weights.keys():
# print(key, gvs[key])
fast_weights = dict(zip(self.weights.keys(),a))
# Validation after each update
val_pred = self.forward(val_x, fast_weights, training)
val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
# record T0 pred and loss for meta-test
val_preds.append(val_pred)
val_losses.append(val_loss)
# continue to build T1-TK steps graph
for _ in range(1, K):
# Update weights on train data of task t
loss = tf.losses.softmax_cross_entropy(train_y,self.forward(train_x, fast_weights, training))
grads = tf.gradients(loss, list(fast_weights.values()))
gvs = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.train_lr * gvs[key] for key in fast_weights.keys()]))
# Evaluate validation data of task t
val_pred = self.forward(val_x, fast_weights, training)
val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
val_preds.append(val_pred)
val_losses.append(val_loss)
result = [train_pred, train_loss, val_preds, val_losses]
return result
out_dtype = [tf.float32, tf.float32,[tf.float32] * K, [tf.float32] * K]
result = tf.map_fn(meta_task, elems=(self.train_xb, self.train_yb, self.val_xb, self.val_yb),
dtype=out_dtype, parallel_iterations=meta_batchsz, name='map_fn')
train_pred_tasks, train_loss_tasks, val_preds_tasks, val_losses_tasks = result
if mode is 'train':
self.train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
self.val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
self.val_predictions = val_preds_tasks
optimizer = tf.train.AdamOptimizer(self.meta_lr, name='meta_optim')
gvs = optimizer.compute_gradients(self.val_losses[-1])
gvs = [(tf.clip_by_norm(grad, 10), var) for grad, var in gvs]
self.meta_op = optimizer.apply_gradients(gvs)
else:
self.test_train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
self.test_val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
self.val_predictions = val_preds_tasks
self.saving_weights = tf.trainable_variables()
def conv_layer(self, x, W, b, name, strides=1):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
x = tf.nn.bias_add(x, b)
return x
def fc_layer(self,x, name, weights=None, biases=None):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
fc = tf.matmul(x, weights)
fc = tf.nn.bias_add(fc, biases)
return fc
def loadWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
if self.saver == None:
z = self.saving_weights
#print("KEYS:", z.keys())
self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
saver = self.saver
checkpoint_path = modeldir + f"{name}/"+model_name +"-" + step
if os.path.isfile(checkpoint_path+".marker"):
saver.restore(sess, checkpoint_path)
print('The checkpoint has been loaded.')
else:
print(checkpoint_path+".marker not found. Starting from scratch.")
def saveWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
if self.saver == None:
z = self.saving_weights
self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
saver = self.saver
checkpoint_path = modeldir + f"{name}/"+model_name
if not os.path.exists(modeldir):
os.makedirs(modeldir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
open(checkpoint_path+"-"+str(int(step))+".marker", 'a').close()
def dense_weights(self):
return
def forward(self,x,weights, training):
return
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
746
|
radrumond/hidra
|
refs/heads/master
|
/archs/hydra.py
|
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import numpy as np
import tensorflow as tf
from archs.maml2 import MAML
def getBin(l=10):
x_ = 2
n = 1
while x_ < l:
x_ = x_* 2
n += 1
numbers = []
for i in range(l):
num = []
for j in list('{0:0b}'.format(i+1).zfill(n)):
num.append(int(j))
numbers.append(num)
return numbers
class Model(MAML):
def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
super().__init__(train_lr,meta_lr,image_shape,isMIN, label_size)
self.finals = 64
if isMIN:
self.finals = 800
def getBin(self, l=10):
x_ = 2
n = 1
while x_ < l:
x_ = x_* 2
n += 1
numbers = []
for i in range(l):
num = []
for j in list('{0:0b}'.format(i+1).zfill(n)):
num.append(int(j))
numbers.append(num)
return numbers
def dense_weights(self):
weights = {}
cells = {}
initializer = tf.contrib.layers.xavier_initializer()
divider = 1
inic = 1
filters = 64
self.finals = 64
if self.isMIN:
print("\n\n\n\n\n\n\n\n\nIS MIN\n\n\n\n\n\n\n\n\n\n\n")
divider = 2
inic = 3
self.finals = 800
filters = 32
with tf.variable_scope('MASTER', reuse= tf.AUTO_REUSE):
cells['d_1'] = tf.get_variable('MASTER_d_1w', [self.finals,1], initializer = initializer)
cells['b_1'] = tf.get_variable('MASTER_d_1b', [1], initializer=tf.initializers.constant)
with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
for i in range (self.max_labels):
weights['d_1w'+str(i)] = tf.get_variable('d_1w'+str(i), [self.finals,1], initializer = initializer)
weights['b_1w'+str(i)] = tf.get_variable('d_1b'+str(i), [1], initializer=tf.initializers.constant)
return weights, cells
def forward(self,x,weights, training):
# with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
bn = tf.layers.Flatten()(conv4)
agg = [self.fc_layer(bn,"dense"+str(i),weights["d_1w"+str(i)],weights["b_1w"+str(i)]) for i in range(self.max_labels)]
fc1 = tf.concat(agg, axis=-1)[:,:self.label_n[0]]
return fc1
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
747
|
radrumond/hidra
|
refs/heads/master
|
/data_gen/omni_gen.py
|
import numpy as np
import os
import cv2
import pickle
class MiniImgNet_Gen:
def __init__(self,path="/tmp/data/miniimagenet",data_path=None):
if data_path is None:
self.path = path
self.train_paths = ["train/"+x for x in os.listdir(path+"/train")]
self.test_paths = ["test/"+x for x in os.listdir(path+"/test")]
self.val_paths = ["val/"+x for x in os.listdir(path+"/val")]
self.data_path = data_path
self.meta_train = None
self.meta_test = None
self.meta_val = None
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
print('Loading MiniImagenet data...')
if training == "train":
if self.meta_train is None:
meta_data = []
for idx,im_class in enumerate(self.train_paths):
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_train = meta_data
else:
meta_data = self.meta_train
elif training == "val":
if self.meta_val is None:
meta_data = []
for idx,im_class in enumerate(self.val_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_val = meta_data
else:
meta_data = self.meta_val
elif training == "test":
if self.meta_test is None:
meta_data = []
for idx,im_class in enumerate(self.test_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_test = meta_data
else:
meta_data = self.meta_test
else:
raise ValueError("Training needs to be train, val or test")
print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}')
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
while True:
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# sample fixed number classes for a meta batch
nr_classes = np.random.randint(min_class,max_class)
for mb in range(mb_size):
# select which classes in the meta batch
classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False)
train_x = []
train_y = []
test_x = []
test_y = []
for label_nr,cl in enumerate(classes):
images = np.random.choice(len(meta_data[cl]),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(meta_data[cl][train_imgs])
test_x.append(meta_data[cl][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,84,84,3])
test_x = np.reshape(test_x,[-1,84,84,3])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
# print('YIEEEEEEELDING')
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
# Initiates the Omniglot dataset and splits into meta train and meta task
class OmniChar_Gen:
def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None):
self.path = path
self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")]
self.lens = {}
for task in self.tasks:
self.lens[task] = len(os.listdir(self.path+task))
self.meta_data = []
print("Loading Omniglot data")
for idx,task in enumerate(range(len(self.tasks))):
if idx%10==0:
print(f"Loading tasks {idx}/{len(self.tasks)}")
data = []
for char in os.listdir(self.path+self.tasks[task]):
c = []
for img in os.listdir(self.path+self.tasks[task]+"/"+char):
c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img))
data.append(c)
self.meta_data.append(data)
self.meta_data = np.concatenate(self.meta_data)
print("Finished loading data")
if test_idx==None:
self.train_idx = list(range(len(self.meta_data)))
np.random.shuffle(self.train_idx)
self.test_idx = self.train_idx[1200:]
self.train_idx = self.train_idx[:1200]
print("Test_idx:",self.test_idx)
else:
self.test_idx = test_idx
self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx))
# Builds a generator that samples meta batches from meta training/test data
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
if training == "train":
idx = self.train_idx
elif training == "test":
idx = self.test_idx
else:
raise ValueError("Omniglot only supports train and test for training param")
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
## We can remove this later and make it dynamic
while True:
image_idx = idx.copy()
np.random.shuffle(image_idx)
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# Roll number of classes in the mb
nr_classes = np.random.randint(min_class,max_class)
for task in range(mb_size):
train_x = []
train_y = []
test_x = []
test_y = []
# Sample the characters for the task
chars = np.random.choice(image_idx,nr_classes,False)
# Sample the shots for each character
for label_nr,char in enumerate(chars):
images = np.random.choice(range(20),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(self.meta_data[char][train_imgs])
test_x.append(self.meta_data[char][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,28,28,1])
test_x = np.reshape(test_x,[-1,28,28,1])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
def getOrder(minClass,maxClass,mb_size,number_chars=1200):
# gives a list integers between minClass and maxClass that sum up to 1200,
lens = []
sums = 0
while sums<=number_chars-minClass*mb_size:
maxV = int((number_chars-sums)/mb_size)+1
n=np.random.randint(minClass,min(maxV,maxClass))
lens += [n]*mb_size
sums = sums+(n*mb_size)
return lens
def readImg(path,size=[28,28],rgb=False):
img = cv2.imread(path)
img = cv2.resize(img,(size[0],size[1])).astype(float)
if np.max(img)>1.0:
img /= 255.
if not rgb:
return img[:,:,:1]
else:
if len(img.shape)==3:
if img.shape[-1]!=3:
print('ASFASFASFAS')
print(img.shape)
print(path)
return img
else:
return np.reshape([img,img,img],[size[0],size[1],3])
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def loadImgDir(path,size,rgb):
imgs = []
for img in os.listdir(path):
imgs.append(readImg(path+"/"+img,size,rgb))
return imgs
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
748
|
radrumond/hidra
|
refs/heads/master
|
/args.py
|
"""
Command-line argument parsing.
"""
import argparse
#from functools import partial
import time
import tensorflow as tf
import json
import os
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def argument_parser():
"""
Get an argument parser for a training script.
"""
file_time = int(time.time())
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--arch', help='name architecture', default="fcn", type=str)
parser.add_argument('--seed', help='random seed', default=0, type=int)
parser.add_argument('--name', help='name add-on', type=str, default='Model_config-'+str(file_time))
parser.add_argument('--dataset', help='data set to evaluate on', type=str, default='Omniglot')
parser.add_argument('--data_path', help='path to data folder', type=str, default='/home/')
parser.add_argument('--config', help='json config file', type=str, default=None)
parser.add_argument('--checkpoint', help='checkpoint directory', default='model_checkpoint')
parser.add_argument('--test', help='Testing or Not', action='store_true')
parser.add_argument('--testintrain', help='Testing during train or Not', action='store_true')
parser.add_argument('--min_classes', help='minimum number of classes for n-way', default=2, type=int)
parser.add_argument('--max_classes', help='maximum (excluded) number of classes for n-way', default=2, type=int)
parser.add_argument('--ttrain_shots', help='number of examples per class in meta train', default=5, type=int)
parser.add_argument('--ttest_shots', help='number of examples per class in meta test', default=15, type=int)
parser.add_argument('--etrain_shots', help='number of examples per class in meta train', default=5, type=int)
parser.add_argument('--etest_shots', help='number of examples per class in meta test', default=15, type=int)
parser.add_argument('--train_inner_K', help='number of inner gradient steps during meta training', default=5, type=int)
parser.add_argument('--test_inner_K', help='number of inner gradient steps during meta testing', default=5, type=int)
parser.add_argument('--learning_rate', help='Adam step size for inner training', default=0.4, type=float)
parser.add_argument('--meta_step', help='meta-training step size', default=0.01, type=float)
parser.add_argument('--meta_batch', help='meta-training batch size', default=1, type=int)
parser.add_argument('--meta_iters', help='meta-training iterations', default=70001, type=int)
parser.add_argument('--eval_iters', help='meta-training iterations', default=2000, type=int)
parser.add_argument('--step', help='Checkpoint step to load', default=59999, type=float)
# python main_emb.py --meta_step 0.005 --meta_batch 8 --learning_rate 0.3 --test --checkpoint Model_config-1568818723
args = vars(parser.parse_args())
#os.system("mkdir -p " + args['checkpoint'])
if args['config'] is None:
args['config'] = f"{args['checkpoint']}/{args['name']}/{args['name']}.json"
print(args['config'])
# os.system("mkdir -p " + f"{args['checkpoint']}")
os.system("mkdir -p " + f"{args['checkpoint']}/{args['name']}")
with open(args['config'], 'w') as write_file:
print("Json Dumping...")
json.dump(args, write_file)
else:
with open(args['config'], 'r') as open_file:
args = json.load(open_file)
return parser
def train_kwargs(parsed_args):
"""
Build kwargs for the train() function from the parsed
command-line arguments.
"""
return {
'min_classes': parsed_args.min_classes,
'max_classes': parsed_args.max_classes,
'train_shots': parsed_args.ttrain_shots,
'test_shots': parsed_args.ttest_shots,
'meta_batch': parsed_args.meta_batch,
'meta_iters': parsed_args.meta_iters,
'test_iters': parsed_args.eval_iters,
'train_step' : parsed_args.step,
'name': parsed_args.name,
}
def test_kwargs(parsed_args):
"""
Build kwargs for the train() function from the parsed
command-line arguments.
"""
return {
'eval_step' : parsed_args.step,
'min_classes': parsed_args.min_classes,
'max_classes': parsed_args.max_classes,
'train_shots': parsed_args.etrain_shots,
'test_shots': parsed_args.etest_shots,
'meta_batch': parsed_args.meta_batch,
'meta_iters': parsed_args.eval_iters,
'name': parsed_args.name,
}
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
749
|
radrumond/hidra
|
refs/heads/master
|
/test.py
|
import numpy as np
import tensorflow as tf
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
def test(m, data_sampler,
eval_step,
min_classes,
max_classes,
train_shots,
test_shots,
meta_batch,
meta_iters,
name):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
losses=[]
temp_yp = []
aps = []
buffer = []
lossesB=[]
train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test")
print("TEST MODE")
m.loadWeights(sess, name, step = str(int(eval_step)), model_name=name+".ckpt")
for i in range(meta_iters):
xb1,yb1,xb2,yb2 = next(train_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
if m.maml_n == 2:
sess.run(m.init_assign, feed_dict={m.label_n:[5]})
l,vals,ps=sess.run([m.test_train_loss,m.test_val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
m.train_yb: yb1,
m.val_xb:xb2,
m.val_yb:yb2,
m.label_n:num_l})
losses.append(vals)
lossesB.append(vals)
buffer.append(l)
true_vals = np.argmax(yb2,axis=-1)
all_accs = []
for pred_epoch in range(len(ps)):
all_accs.append(np.mean(np.argmax(ps[pred_epoch],axis=-1)==true_vals))
temp_yp.append(all_accs)
# if i%1==0:
if i%50==0:
print(f"({i}/{meta_iters})")
print(f"Final: TLoss {np.mean(buffer)}, VLoss {np.mean(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}" )
print(f"Final: TLoss {np.mean(buffer)}-{np.std(buffer)}, VLoss {np.mean(lossesB,axis=0)}-{np.std(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}-{np.std(temp_yp,axis=0)}" )
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
750
|
radrumond/hidra
|
refs/heads/master
|
/main.py
|
## Created by Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
from archs.fcn import Model as mfcn
from archs.hydra import Model as mhyd
from train import *
from test import *
from args import argument_parser, train_kwargs, test_kwargs
import random
args = argument_parser().parse_args()
random.seed(args.seed)
t_args = train_kwargs(args)
e_args = test_kwargs (args)
print("########## argument sheet ########################################")
for arg in vars(args):
print (f"#{arg:>15} : {str(getattr(args, arg))} ")
print("##################################################################")
print("Loading Data...")
if args.dataset in ["Omniglot", "omniglot", "Omni", "omni"]:
loader = OmniChar_Gen (args.data_path)
isMIN = False
shaper = [28,28,1]
elif args.dataset in ["miniimagenet", "MiniImageNet", "mini"]:
loader = MiniImgNet_Gen(args.data_path)
isMIN = True
shaper = [84,84,3]
else:
raise ValueError("INVALID DATA-SET NAME!")
print("Building Model...")
if args.arch == "fcn"or args.arch == "maml":
print("SELECTED: MAML")
m = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
mt = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
#elif args.arch == "rnn":
# m = mrnn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.min_classes)
elif args.arch == "hydra" or args.arch == "hidra":
print("SELECTED: HIDRA")
m = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
mt = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
else:
raise ValueError("INVALID Architecture NAME!")
mode = "train"
if args.test:
mode = "test"
print("Starting Test Step...")
mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode=mode)
test (mt, loader, **e_args)
else:
modeltest = None
if args.testintrain:
mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode="test")
modeltest = mt
print("Starting Train Step...")
m.build (K = args.train_inner_K, meta_batchsz = args.meta_batch, mode=mode)
train(m, modeltest, loader, **t_args)
|
{"/archs/fcn.py": ["/archs/maml.py"], "/train.py": ["/data_gen/omni_gen.py"], "/test.py": ["/data_gen/omni_gen.py"], "/main.py": ["/data_gen/omni_gen.py", "/archs/fcn.py", "/archs/hydra.py", "/train.py", "/test.py", "/args.py"]}
|
753
|
RamneekSingh24/Discord-Bot-Codedrills
|
refs/heads/main
|
/main.py
|
import requests
from bs4 import BeautifulSoup
import discord
import os
from tabulate import tabulate
import handlers
import pandas as pd
from helpers import get_url, get_problems, trim,load_problems
from handlers import start_contest, update_leaderboard,add_cf_user,users,get_recommendations_topics, set_handle, recommendations_handle
from keep_alive import keep_alive
import weasyprint as wsp
import PIL as pil
# global running
# running = contest_running
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
global contest_running
if message.author == client.user:
return
msg = message.content
#params = msg.lower().split(' ')
params = msg.split(' ')
if params[0][0] != '!':
return
if params[0] == '!setrc':
handle = params[1]
rc = set_handle(handle)
if rc < 0:
await message.channel.send('Invalid codeforces handle')
else:
await message.channel.send('Done! Getting recommandations from: '+handle+".")
if params[0] == '!topics':
msg = get_recommendations_topics(recommendations_handle)
await message.channel.send(msg)
if params[0] == '!add':
username = params[1]
rc = add_cf_user(username)
if rc == -1:
await message.channel.send('User already registered!')
elif rc == -2:
await message.channel.send('Not a valid user on CodeForces!')
else:
await message.channel.send(f"Sucessfully added {username}")
elif params[0] == '!all':
await message.channel.send(users)
elif params[0] == '!start':
if handlers.contest_running:
await message.channel.send("A contest is already Active !")
return
task = "_".join(word for word in params[1:])
#img_filepath = 'table.png'
#print(task)
msg = start_contest(task)
if msg == "error":
await message.channel.send("Please Try Again!")
else:
e = discord.Embed(
title=f"Problem Set {handlers.ID}\n",
description=msg,
color=0xFF5733)
await message.channel.send(embed=e)
elif params[0] == '!lb':
id = params[1] if len(params) > 1 else handlers.ID
df_lead = update_leaderboard(id)
df_lead['Total'] = df_lead[list(df_lead.columns)[1:]].sum(axis=1)
df_lead.sort_values(by='Total',ascending=False, inplace=True)
await message.channel.send("```"+tabulate(df_lead, headers='keys', tablefmt='psql', showindex=False)+"```")
# f = discord.File('table.png', filename="image.png")
# e = discord.Embed(title='Leaderboard', color=0xFF5733)
# e.set_image(url="attachment://image.png")
# await message.channel.send(file=f, embed=e)
elif params[0] == "!prob":
id = params[1] if len(params) > 1 else handlers.ID
msg = load_problems(id)
e = discord.Embed(
title=f"Problem Set {handlers.ID}\n",
description=msg,
color=0xFF5733)
await message.channel.send(embed=e)
elif params[0] == "!end":
if handlers.contest_running == 0:
await message.channel.send("No contest is running !")
else:
handlers.contest_running = 0
await message.channel.send("Contest Abandoned !")
keep_alive()
client.run(os.getenv('TOKEN'))
|
{"/main.py": ["/handlers.py", "/helpers.py"], "/handlers.py": ["/helpers.py"]}
|
754
|
RamneekSingh24/Discord-Bot-Codedrills
|
refs/heads/main
|
/helpers.py
|
import requests
from bs4 import BeautifulSoup
import discord
import os
import pandas as pd
import weasyprint as wsp
import PIL as pil
def get_url(task,handle):
URL = 'https://recommender.codedrills.io/profile?handles=cf%2Fjatinmunjal2k'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
# print(task)
result = soup.find(id=task)
url = result.find(title='An url for sharing and keeping track of solved problems for this recommendation list')
link = "https://recommender.codedrills.io"+url['href']
return link
def get_problems(task, ID,handle):
# print(ID)
items = [[],[]]
buffer = ""
URL = get_url(task,handle)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
elems = soup.find_all('tr')
idx = 1
for e in elems:
a_tag = e.find('a')
buffer = buffer +"["+str(idx)+"](" + a_tag['href'] + ") " + a_tag.text + "\n"
items[0].append(a_tag.text)
items[1].append(a_tag['href'])
idx += 1
df = pd.DataFrame(list(zip(items[0],items[1])), columns = ['name', 'link'])
df.to_csv('contests/problems-contest'+str(ID)+'.csv' , index = False)
#print(df.head(3))
return buffer
def load_problems(id):
df = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
buffer = ""
for idx, row in df.iterrows():
buffer = buffer + row['name'] + " [Link](" + row['link'] + ")\n"
return buffer
def trim(source_filepath, target_filepath=None, background=None):
if not target_filepath:
target_filepath = source_filepath
img = pil.Image.open(source_filepath)
if background is None:
background = img.getpixel((0, 0))
border = pil.Image.new(img.mode, img.size, background)
diff = pil.ImageChops.difference(img, border)
bbox = diff.getbbox()
img = img.crop(bbox) if bbox else img
img.save(target_filepath)
|
{"/main.py": ["/handlers.py", "/helpers.py"], "/handlers.py": ["/helpers.py"]}
|
755
|
RamneekSingh24/Discord-Bot-Codedrills
|
refs/heads/main
|
/handlers.py
|
import requests
from bs4 import BeautifulSoup
import discord
import os
from tabulate import tabulate
import pandas as pd
from helpers import get_url, get_problems, trim,load_problems
from keep_alive import keep_alive
import weasyprint as wsp
import PIL as pil
global ID, contest_running, users, recommendations_handle
ID = 0
contest_running = 0
users = []
recommendations_handle = 'jatinmunjal2k'
def get_recommendations_topics(handle='jatinmunjal2k'):
topics = "Available Topics:\n"
URL = 'https://recommender.codedrills.io/profile?handles=cf%2F' + handle
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
ul = soup.find("ul", class_="nav nav-pills")
tags = ul.find_all('li')
for e in tags:
topics = topics + e.text.strip() + ", "
return topics[:-2]
def set_handle(handle):
global recommendations_handle
r = requests.head('https://codeforces.com/profile/'+handle)
if r.status_code != 200:
return -1
recommendations_handle = handle
return 0
def start_contest(task):
global ID, contest_running
try:
ID += 1
problems_str = get_problems(task, ID,recommendations_handle)
init_leaderboard(ID)
contest_running = 1
return problems_str
except:
ID -= 1
return "error"
def add_cf_user(cf_handle):
global users
if cf_handle in users:
return -1
r = requests.head('https://codeforces.com/profile/'+cf_handle)
if r.status_code != 200:
return -2
users.append(cf_handle)
if contest_running == 1:
df = pd.read_csv('contests/leaderboard'+str(ID)+'.csv')
entry = [cf_handle] + [0]*(df.shape[1]-1)
df.loc[len(df)] = entry
df.to_csv('contests/leaderboard'+str(ID)+'.csv',index = False)
return 1
# def print_leaderboard(id, img_filepath):
# df_leaderboard = pd.read_csv('contests/leaderboard'+str(id)+'.csv')
# css = wsp.CSS(string='''
# @page { size: 2048px 2048px; padding: 0px; margin: 0px; }
# table, td, tr, th { border: 1px solid black; }
# td, th { padding: 4px 8px; }
# ''')
# html = wsp.HTML(string=df_leaderboard.to_html(index=False))
# html.write_png(img_filepath, stylesheets=[css])
# trim(img_filepath)
def init_leaderboard(id):
df = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
problems = df['name']
zeros = [ [0]*len(users) for i in range(len(problems))]
df_scoreboard = pd.DataFrame(data=list(zip(users,*zeros)), columns=['User']+list(range(1,len(problems)+1)))
df_scoreboard.to_csv('contests/leaderboard'+str(id)+'.csv',index=False)
# print_leaderboard(id, img_filepath)
def update_leaderboard(id):
global users
df_prob = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
df_lead = pd.read_csv('contests/leaderboard'+str(id)+'.csv')
for idxu, ru in df_lead.iterrows():
user = ru['User']
URL = 'https://codeforces.com/submissions/' + user
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
submissions = soup.find_all('tr')
ac = []
for submission in submissions:
data = submission.find_all('td')
try:
url = data[3].find('a')['href'].split('/')
verdict = data[5].text
#print(url, repr(verdict))
if 'Accepted' in verdict:
ac.append('/'+url[2]+'/'+url[-1])
except:
continue
j = 0
for idx, row in df_prob.iterrows():
j += 1
link = row['link']
for pid in ac:
if pid in link:
df_lead.at[idxu,str(j)] = 1
df_lead.to_csv('contests/leaderboard'+str(id)+'.csv',index = False)
# print_leaderboard(id, 'table.png')
return df_lead
|
{"/main.py": ["/handlers.py", "/helpers.py"], "/handlers.py": ["/helpers.py"]}
|
756
|
rakshitshah-28/APSITSkills-Project
|
refs/heads/master
|
/Part_5.py
|
# Write a Python program to check whether a
# specified value is contained in a group of values.
# 3 -> [1, 5, 8, 3] : True -1 -> [1, 5, 8, 3] : False
import random
def check_in_group():
while True:
test_case = [1, 5, 8, 3]
print('\nEnter \'-1\' to QUIT.')
value = input('Enter - ')
try:
value = int(value)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if value == -1:
print('\tTHANK YOU.\n\tRETURNING TO MAIN MENU.\n')
break
if value in test_case:
print('True')
break
else:
print('False')
continue
# in case needed.
def check_random():
while True:
test_case = list()
length = input('\nEnter Length of the test_case - ')
try:
length = int(length)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
for _ in range(length):
test_case.append(random.choice(range(10)))
break
# print(test_case)
while True:
print('\nEnter \'-1\' to QUIT.')
value = input('Enter - ')
try:
value = int(value)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if value == -1:
print('\tTHANK YOU.\n\tRETURNING TO MAIN MENU.\n')
break
if value in test_case:
print('True')
break
else:
print('False')
continue
|
{"/CheckingWithProgram.py": ["/Part_5.py"]}
|
757
|
rakshitshah-28/APSITSkills-Project
|
refs/heads/master
|
/CheckingWithProgram.py
|
# Menu-Driven program
import string
import random
# just for reference purposes.
from Part_1 import all_prime
from Part_2 import even_odd
from Part_3 import prime_composite
from Part_4 import vowel_consonant
from Part_5 import check_in_group
while True:
print('\nChoose your Option - ')
print('0. Exit')
print('1. Print Prime Numbers between 1 to 1000.')
print('2. To Find whether Number is ODD or EVEN.')
print('3. To Find whether Number is PRIME or COMPOSITE.')
print('4. To Find whether Alphabet is VOWEL or NOT.')
print('5. To Check specified Value n Group of Values')
option = input('Enter - ')
try:
option = int(option)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if (option < 0 or option > 5):
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if option == 0:
print('\n\tTHANK YOU FOR JOINING US!')
exit(-1)
elif option == 1:
all_prime()
elif option == 2:
even_odd()
elif option == 3:
prime_composite()
elif option == 4:
vowel_consonant()
elif option == 5:
check_in_group()
|
{"/CheckingWithProgram.py": ["/Part_5.py"]}
|
794
|
hamzaalkharouf/House-Price-prediction
|
refs/heads/master
|
/scikit_learn.py
|
import pickle
import argparse
import numpy as np
#take model
#Calculate price from scikit
def path(list_data):
parser = argparse.ArgumentParser()
parser.add_argument("-path","--path",type = str)
args = parser.parse_args()
# './model.pickle'
loaded_model = pickle.load(open(args.path, 'rb'))
x = np.array(list_data).reshape(1,6)
result = loaded_model.predict(x)
if x.shape[0] == 1:
result = result[0]
return result
|
{"/app.py": ["/scikit_learn.py", "/Write_Csv.py"]}
|
795
|
hamzaalkharouf/House-Price-prediction
|
refs/heads/master
|
/app.py
|
from flask import Flask,request
import scikit_learn
import Write_Csv
app = Flask(__name__)
#append data(from url) to list
def Data_append(x1,x2,x3,x4,x5,x6):
list_data=[]
list_data.append(x1)
list_data.append(x2)
list_data.append(x3)
list_data.append(x4)
list_data.append(x5)
list_data.append(x6)
return list_data
#route /
#take data from url then send them to scikit_learn of Calculate price from scikit
#return information
@app.route('/')
def hello_world():
transaction_date=float(request.args.get('transaction_date'))
house_age=float(request.args.get('house_age'))
distance_to_the__nearest_MRT_station=float(request.args.get('distance_to_the__nearest_MRT_station'))
number_of_convenience_stores=float(request.args.get('number_of_convenience_stores'))
latitude=float(request.args.get('latitude'))
longitude=float(request.args.get('longitude'))
list_data=[]
list_data=Data_append(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude)
price=scikit_learn.path(list_data)
list_data.append(price)
Write_Csv.Write_Csv(list_data)
return '''<h3>
transaction date : {}<br>
house age= {}<br>
distance to the nearest MRT station= {}<br>
number of convenience stores= {}<br>
latitude= {}<br>
longitude= {}<br>
price ={}
</h3>'''.format(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude,price)
#to run servier => py app.py -path ./model.pickle
if __name__ == '__main__':
app.run(port=5060,debug=False,use_reloader=False)
# http://127.0.0.1:5060/?transaction_date=2017.917&house_age=10&distance_to_the__nearest_MRT_station=306.59470&number_of_convenience_stores=15&latitude=24.98034&longitude=121.53951
|
{"/app.py": ["/scikit_learn.py", "/Write_Csv.py"]}
|
796
|
hamzaalkharouf/House-Price-prediction
|
refs/heads/master
|
/Write_Csv.py
|
import pandas as pd
import os
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file_estate = os.path.join(THIS_FOLDER,'csv\\Real estate.csv')
def Write_Csv(list_data):
df = pd.read_csv(my_file_estate)
file = open(my_file_estate,"a")
number=df['No'].values[-1]
number+=1
file.write(str(number)+",")
for i in list_data:
if i != list_data[6]:
file.write(str(i)+",")
else :file.write(str(i)+"\n")
file.close()
df.reset_index(drop = True,inplace=True)
|
{"/app.py": ["/scikit_learn.py", "/Write_Csv.py"]}
|
797
|
Dorencon/Classification-and-detection
|
refs/heads/master
|
/ie_classifier.py
|
from openvino.inference_engine import IECore
import cv2
import numpy as np
class InferenceEngineClassifier:
def __init__(self, configPath = None, weightsPath = None, device = None, extension = None, classesPath = None):
IEc = IECore()
if (extension and device == "CPU"):
IEc.add_extension(extension, device)
self.net = IEc.read_network(configPath, weightsPath)
self.exec_net = IEc.load_network(self.net, device_name=device)
with open(classesPath, 'r') as f:
self.classes = [i.strip() for i in f]
def _prepare_image(self, image, h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def classify(self, image):
input_blob = next(iter(self.net.inputs))
out_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
image = self._prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: image})
output = output[out_blob]
return output
def get_top(self, prob, topN = 1):
prob = np.squeeze(prob)
top = np.argsort(prob)
out = []
for i in top[1000 - topN:1000]:
out.append([self.classes[i], '{:.15f}'.format(prob[i])])
out.reverse()
return out
|
{"/classification_sample.py": ["/ie_classifier.py"], "/detection_sample.py": ["/ie_detector.py"]}
|
798
|
Dorencon/Classification-and-detection
|
refs/heads/master
|
/classification_sample.py
|
import ie_classifier as ic
import argparse
import logging as log
import sys
import cv2
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help='Path to an .xml \
file with a trained model.', required=True, type=str)
parser.add_argument('-w', '--weights', help='Path to an .bin file \
with a trained weights.', required=True, type=str)
parser.add_argument('-i', '--input', help='Path to \
image file', required=True, type=str)
parser.add_argument('-c', '--classes', help='File containing \
classnames', type=str, default=None)
parser.add_argument('-d', '--device', help='Device name',
default = "CPU", type = str)
parser.add_argument('-e', '--cpu_extension', help='For custom',
default = None, type = str)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Start IE classification sample")
ie_classifier = ic.InferenceEngineClassifier(configPath=args.model,
weightsPath=args.weights, device=args.device, extension=args.cpu_extension,
classesPath=args.classes)
img = cv2.imread(args.input)
prob = ie_classifier.classify(img)
predictions = ie_classifier.get_top(prob, 5)
log.info("Predictions: " + str(predictions))
return
if __name__ == '__main__':
sys.exit(main())
|
{"/classification_sample.py": ["/ie_classifier.py"], "/detection_sample.py": ["/ie_detector.py"]}
|
799
|
Dorencon/Classification-and-detection
|
refs/heads/master
|
/ie_detector.py
|
from openvino.inference_engine import IECore
import cv2
import numpy as np
class InferenceEngineDetector:
def __init__(self, configPath = None, weightsPath = None,
device = None, extension = None, classesPath = None):
IEc = IECore()
if (extension and device == 'CPU'):
IEc.add_extension(extension, device)
self.net = IEc.read_network(configPath, weightsPath)
self.exec_net = IEc.load_network(self.net, device_name = device)
with open(classesPath, 'r') as f:
self.classes = [i.strip() for i in f]
def _prepare_image(self, image, h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def detect(self, image):
input_blob = next(iter(self.net.inputs))
output_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
image = self._prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: image})
output = output[output_blob]
return output
def draw_detection(self, detections, image, confidence = 0.5, draw_text = True):
detections = np.squeeze(detections)
h, w, c = image.shape
for classdet in detections:
if (classdet[2] > confidence):
image = cv2.rectangle(image, (int(classdet[3] * w), int(classdet[4] * h)),
(int(classdet[5] * w), int(classdet[6] * h)),
(0, 255, 0), 1)
if (draw_text):
image = cv2.putText(image,
self.classes[int(classdet[1])]
+ ' ' + str('{:.2f}'.format(classdet[2] * 100)) + '%',
(int(classdet[3] * w - 5), int(classdet[4] * h - 5)),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(0, 0, 255), 1)
return image
|
{"/classification_sample.py": ["/ie_classifier.py"], "/detection_sample.py": ["/ie_detector.py"]}
|
800
|
Dorencon/Classification-and-detection
|
refs/heads/master
|
/detection_sample.py
|
import ie_detector as id
import logging as log
import cv2
import argparse
import sys
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help = 'Path to an .xml \
file with a trained model.', required = True, type = str)
parser.add_argument('-w', '--weights', help = 'Path to an .bin file \
with a trained weights.', required = True, type = str)
parser.add_argument('-i', '--input', help = 'Path to \
image file.', required = True, type = str)
parser.add_argument('-d', '--device', help = 'Device name',
default='CPU', type = str)
parser.add_argument('-l', '--cpu_extension',
help = 'MKLDNN (CPU)-targeted custom layers. \
Absolute path to a shared library with the kernels implementation',
type = str, default=None)
parser.add_argument('-c', '--classes', help = 'File containing \
classnames', type = str, default=None)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Start IE detection sample")
ie_detector = id.InferenceEngineDetector(configPath=args.model,
weightsPath=args.weights,
device=args.device,
extension=args.cpu_extension,
classesPath=args.classes)
img = cv2.imread(args.input)
detections = ie_detector.detect(img)
image_detected = ie_detector.draw_detection(detections, img)
cv2.imshow('Image with detections', image_detected)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
if (__name__ == '__main__'):
sys.exit(main())
|
{"/classification_sample.py": ["/ie_classifier.py"], "/detection_sample.py": ["/ie_detector.py"]}
|
803
|
bernarducs/mei
|
refs/heads/master
|
/init.py
|
from selenium import webdriver
def config(path_folder: str, headless: bool):
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.download.dir", path_folder)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/csv")
fp.set_preference("dom.disable_beforeunload", True)
fp.set_preference("browser.download.manager.closeWhenDone", True)
options = webdriver.FirefoxOptions()
if headless:
options.add_argument('-headless')
driver = webdriver.Firefox(fp, options=options)
return driver
|
{"/mei.py": ["/helpers.py"], "/bot.py": ["/init.py", "/mei.py", "/helpers.py"]}
|
804
|
bernarducs/mei
|
refs/heads/master
|
/mei.py
|
import os
import time
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support.wait import WebDriverWait
from helpers import print_timestamp
class Mei:
def __init__(self, driver, files_path, uf):
self.driver = driver
self.files_path = os.path.join(os.getcwd(), files_path)
# print(self.files_path)
self.uf = uf
def _retorna_xpath(self, driver, timeout, freq, xpath):
wbw = WebDriverWait(driver=driver,
timeout=timeout,
poll_frequency=freq)
wbw.until(presence_of_element_located((By.XPATH, xpath)),
"Elemento não encontrado.")
xpath = driver.find_element_by_xpath(xpath)
return xpath
def retorna_tabela(self, xpath_btn_consulta, xpath_tab_completa):
time.sleep(2)
print('Extraindo tabela.', print_timestamp())
tentativas = [1, 2, 3]
for i in tentativas:
print(f"Tentativa {i} de 3...")
self.driver.find_element_by_xpath(xpath_btn_consulta).click()
try:
self._retorna_xpath(self.driver, 150, 5, xpath_tab_completa)
print('Tabela carregada.', print_timestamp())
return True
except TimeoutException:
print('Tabela não foi carregada.')
return False
def del_arquivos_inuteis(self):
files_path = self.files_path
for file in os.listdir(files_path):
if file[:13] == 'relatorio_mei':
os.remove(os.path.join(files_path, file))
def renomeia_arquivo(self):
files_path = self.files_path
uf = self.uf
file = r'relatorio_mei.csv'
if file in os.listdir(files_path):
old_file = os.path.join(files_path, file)
new_file = self.nome_arquivo(uf)
new_file = os.path.join(files_path, new_file)
try:
os.rename(old_file, new_file)
print(f"Arquivo renomeado para {new_file} " + print_timestamp())
except FileExistsError:
print("Arquivo já existe.")
def verifica_arquivo(self):
files_path = self.files_path
if not os.path.exists(files_path):
os.mkdir(files_path)
print(f"Arquivos baixados ficarão na pasta {files_path}.")
uf = self.uf
name = self.nome_arquivo(uf)
if name in os.listdir(files_path):
return name
else:
return False
def nome_arquivo(self, uf):
data = print_timestamp(now=False)
return f"{uf}_cnae_e_municipios_{data}.csv"
def exporta_csv(self):
driver = self.driver
xpath_btn_exportar = '//*[@id="form:botaoExportarCsv"]'
driver.find_element_by_xpath(xpath_btn_exportar).click()
time.sleep(10)
print('Download concluído.', print_timestamp())
def abre_browser(self):
url = 'http://www22.receita.fazenda.gov.br/inscricaomei/private/pages/relatorios/opcoesRelatorio.jsf#'
xpath = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/p'
while True:
driver = self.driver
try:
driver.get(url)
print('Browser iniciado. ' + print_timestamp())
print('Extraindo ' + self.uf + '...')
self._retorna_xpath(driver, 15, 5, xpath)
break
except TimeoutException as e:
driver.quit()
print(e)
def carrega_pagina_relatorio(self, xpath_page):
driver = self.driver
page = driver.find_element_by_xpath(xpath_page)
page.click()
def uf_listbox(self, xpath_listbox):
time.sleep(5)
driver = self.driver
uf = self.uf
el = driver.find_element_by_xpath(xpath_listbox)
for option in el.find_elements_by_tag_name('option'):
if option.text == uf:
option.click()
break
class MeiCnaeMunicipio(Mei):
xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[6]/a'
xpath_listbox = '//*[@id="form:uf"]'
xpath_municipios = '//*[@id="form:listaMunicipiosUF"]'
xpath_relatorio = '//*[@id="form:listaMunicipiosRelatorio"]'
xpath_btn_inserir = '//*[@id="form:btnInserir"]'
xpath_btn_consulta = '//*[@id="form:botaoConsultar"]'
xpath_tab_completa = '//*[@id="form:j_id62"]'
def __init__(self, driver, files_path, uf):
super().__init__(driver, files_path, uf)
def verifica_listbox_municipios(self):
driver = self.driver
for tries in [1, 2, 3]:
print(f"Carregando municípios. Tentativa {tries}/3.", print_timestamp())
time.sleep(5)
# verifica se a 1a listbox está preenchida
cities = driver.find_element_by_xpath(self.xpath_municipios)
n_cities = len(cities.text.split('\n'))
if n_cities > 1 or cities.text == 'BRASILIA':
cities.find_elements_by_tag_name('option')[0].click()
cities.send_keys(Keys.SHIFT, Keys.END)
driver.find_element_by_xpath(self.xpath_btn_inserir).click()
time.sleep(5)
# verifica se a 2a listbox está preenchida
rel = driver.find_element_by_xpath(self.xpath_relatorio)
n_rel = len(rel.text.split('\n'))
if n_rel > 1 or rel.text == 'BRASILIA':
print("Municipíos carregados.")
break
# se nao atenderem as condições
if n_cities <= 1 and tries == 3:
print("Não foi possível carregar os municípios.")
return False
return True
class MeiCnaeSexoUF(Mei):
xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[7]/a'
xpath_listbox = '//*[@id="form:uf"]'
xpath_municipios = '//*[@id="form:municipioUF"]'
xpath_btn_consulta = '//*[@id="form:botaoConsultar"]'
xpath_tab_completa = '//*[@id="form:botaoExportarCsv"]'
def __init__(self, driver, files_path, uf):
super().__init__(driver, files_path, uf)
def nome_arquivo(self, uf):
data = print_timestamp(now=False)
return f"{uf}_cnae_e_sexo_{data}.csv"
|
{"/mei.py": ["/helpers.py"], "/bot.py": ["/init.py", "/mei.py", "/helpers.py"]}
|
805
|
bernarducs/mei
|
refs/heads/master
|
/bot.py
|
import os
import fire
from selenium.common.exceptions import NoSuchElementException, \
WebDriverException, NoSuchWindowException
from init import config
from mei import MeiCnaeMunicipio, MeiCnaeSexoUF
from helpers import retorna_ufs
def ufs_por_municipio_cnae(pasta="arquivos", invisivel=True):
ufs = retorna_ufs()
for uf in ufs:
uf_por_municipio_cnae(uf=uf, pasta=pasta, invisivel=invisivel)
def uf_por_municipio_cnae(uf="PERNAMBUCO", pasta="arquivos", invisivel=True):
path_file = os.path.join(os.getcwd(), pasta)
driver = config(path_file, headless=invisivel)
mei = MeiCnaeMunicipio(driver, path_file, uf)
file = mei.verifica_arquivo()
if not file:
mei.del_arquivos_inuteis()
try:
mei.abre_browser()
mei.carrega_pagina_relatorio(mei.xpath_page)
mei.uf_listbox(mei.xpath_listbox)
checkbox = mei.verifica_listbox_municipios()
if checkbox:
table = mei.retorna_tabela(mei.xpath_btn_consulta,
mei.xpath_tab_completa)
if table:
mei.exporta_csv()
mei.renomeia_arquivo()
else:
print(f"Não foi possível exportar o arquivo")
else:
print(f"Não foi possível exportar o arquivo.")
driver.quit()
except (NoSuchElementException, WebDriverException,
NoSuchWindowException) as e:
print(e)
driver.quit()
print("Não foi possível exportar o arquivo.")
else:
print(f"O arquivo {file} já existe.")
def ufs_por_sexo_cnae(pasta="arquivos", invisivel=True):
ufs = retorna_ufs()
for uf in ufs:
uf_por_sexo_cnae(uf=uf, pasta=pasta, invisivel=invisivel)
def uf_por_sexo_cnae(uf="PERNAMBUCO", pasta="arquivos", invisivel=True):
path_file = os.path.join(os.getcwd(), pasta)
driver = config(path_file, headless=invisivel)
mei = MeiCnaeSexoUF(driver, path_file, uf)
file = mei.verifica_arquivo()
if not file:
mei.del_arquivos_inuteis()
try:
mei.abre_browser()
mei.carrega_pagina_relatorio(mei.xpath_page)
mei.uf_listbox(mei.xpath_listbox)
table = mei.retorna_tabela(mei.xpath_btn_consulta,
mei.xpath_tab_completa)
if table:
mei.exporta_csv()
mei.renomeia_arquivo()
else:
print(f"Não foi possível exportar o arquivo")
driver.quit()
except (NoSuchElementException, WebDriverException,
NoSuchWindowException) as e:
print(e)
driver.quit()
print("Não foi possível exportar o arquivo.")
else:
print(f"O arquivo {file} já existe.")
if __name__ == '__main__':
fire.Fire()
|
{"/mei.py": ["/helpers.py"], "/bot.py": ["/init.py", "/mei.py", "/helpers.py"]}
|
806
|
bernarducs/mei
|
refs/heads/master
|
/helpers.py
|
import time
def print_timestamp(now=True):
timestamp = time.localtime(time.time())
if now:
print_time = '{}/{}/{} {}:{}:{}'.format(timestamp.tm_mday, timestamp.tm_mon, timestamp.tm_year,
timestamp.tm_hour, timestamp.tm_min, timestamp.tm_sec)
return print_time
print_time = '{:04d}{:02d}{:02d}'.format(timestamp.tm_year, timestamp.tm_mon, timestamp.tm_mday)
return print_time
def retorna_ufs():
with open('lista de uf.txt', 'r', encoding='latin-1') as f:
file = f.readlines()
ufs = [uf[:-1] for uf in file]
return ufs
|
{"/mei.py": ["/helpers.py"], "/bot.py": ["/init.py", "/mei.py", "/helpers.py"]}
|
808
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/spider.py
|
"""Spider Tool for Answerable
This file contains the functions used to wrapp requests following
respecful practices, taking into account robots.txt, conditional
gets, caching contente, etc.
"""
import json
import requests
# from random import random as rnd
from time import sleep
from datetime import timedelta as td
import feedparser
from urllib.robotparser import RobotFileParser
from urllib.parse import urlparse
from tools import cache
from tools.displayer import fg, bold, green, yellow, red
from tools.log import log, abort
_rp = {} # robots.txt memory
class _FalseResponse:
"""Object with the required fields to simulate a HTTP response"""
def __init__(self, code, content):
self.status_code = code
self.content = content
def ask_robots(url: str, useragent: str) -> bool:
"""Check if the useragent is allowed to scrap an url
Parse the robot.txt file, induced from the url, and
check if the useragent may fetch a specific url.
"""
url_struct = urlparse(url)
base = url_struct.netloc
if base not in _rp:
_rp[base] = RobotFileParser()
_rp[base].set_url(url_struct.scheme + "://" + base + "/robots.txt")
_rp[base].read()
return _rp[base].can_fetch(useragent, url)
def get(url, delay=2, use_cache=True, max_delta=td(hours=12)):
"""Respectful wrapper around requests.get"""
useragent = "Answerable v0.1"
# If a cached answer exists and is acceptable, then return the cached one.
cache_file = url.replace("/", "-")
if use_cache:
log("Checking cache before petition {}", fg(url, yellow))
hit, path = cache.check("spider", cache_file, max_delta)
if hit:
with open(path, "r") as fh:
res = fh.read().replace("\\r\\n", "")
return _FalseResponse(200, res)
# If the robots.txt doesn't allow the scraping, return forbidden status
if not ask_robots(url, useragent):
log(fg("robots.txt forbids {}", red), url)
return _FalseResponse(403, "robots.txt forbids it")
# Make the request after the specified delay
# log("[{}] {}".format(fg("{:4.2f}".format(delay), yellow), url))
log("Waiting to ask for {}", fg(url, yellow))
log(" in {:4.2f} seconds", delay)
sleep(delay)
headers = {"User-Agent": useragent}
log("Requesting")
res = requests.get(url, timeout=10, headers=headers)
# Exit the program if the scraping was penalized
if res.status_code == 429: # too many requests
abort("Too many requests")
# Cache the response if allowed by user
if use_cache:
cache.update(
"spider", cache_file, res.content.decode(res.encoding), json_format=False
)
return res
def get_feed(url, force_reload=False):
"""Get RSS feed and optionally remember to reduce bandwith"""
useragent = "Answerable RSS v0.1"
log("Requesting feed {}", fg(url, yellow))
cache_file = url.replace("/", "_")
# Get the conditions for the GET bandwith reduction
etag = None
modified = None
if not force_reload:
hit, path = cache.check("spider.rss", cache_file, td(days=999))
if hit:
with open(path, "r") as fh:
headers = json.load(fh)
etag = headers["etag"]
modified = headers["modified"]
log("with {}: {}", bold("etag"), fg(etag, yellow))
log("with {}: {}", bold("modified"), fg(modified, yellow))
# Get the feed
feed = feedparser.parse(url, agent=useragent, etag=etag, modified=modified)
# Store the etag and/or modified headers
if feed.status != 304:
etag = feed.etag if "etag" in feed else None
modified = feed.modified if "modified" in feed else None
new_headers = {
"etag": etag,
"modified": modified,
}
cache.update("spider.rss", cache_file, new_headers)
log("Stored new {}: {}", bold("etag"), fg(etag, green))
log("Stored new {}: {}", bold("modified"), fg(modified, green))
return feed
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
809
|
limkokholefork/Answerable
|
refs/heads/main
|
/models/content_based_0.py
|
"""Recommender Tool for Answerable
This file contains the recommendation algorithm.
"""
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel
def recommend(user_qa, feed):
answered = [
x[0]["title"] + " " + bs(x[0]["body"], "html.parser").getText(" ", strip=True)
for x in user_qa
]
tags_ans = [" ".join(x[0]["tags"]) for x in user_qa]
questions = [x["title"] + x["body"] for x in feed]
tags_unans = [" ".join(x["tags"]) for x in feed]
nans = len(answered)
nunans = len(questions)
"""
The following code is an adapted version of the Content-Based recommmender
described in this tutorial:
https://www.datacamp.com/community/tutorials/recommender-systems-python
"""
tfidf = TfidfVectorizer(stop_words="english")
count = CountVectorizer(stop_words="english")
# list of vectorized body and tags
tfidf_matrix = tfidf.fit_transform(answered + questions)
count_matrix = count.fit_transform(tags_ans + tags_unans)
# similarity matrices: without and with tags
cosine_sim_body = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_sim_tags = linear_kernel(count_matrix, count_matrix) + cosine_sim_body
# rows: unanswered, cols: answered
unans_similarity_body = cosine_sim_body[nans:, :nans]
unans_similarity_tags = cosine_sim_tags[nans:, :nans]
# form of the following lists: [(feed index, value)]
sum_sim_body = enumerate([sum(r) for r in unans_similarity_body])
max_sim_body = enumerate([max(r) for r in unans_similarity_body])
sum_sim_tags = enumerate([sum(r) for r in unans_similarity_tags])
max_sim_tags = enumerate([max(r) for r in unans_similarity_tags])
# sort the indices by the value
sort_sum_sim_body = sorted(sum_sim_body, key=lambda x: x[1], reverse=True)
sort_max_sim_body = sorted(max_sim_body, key=lambda x: x[1], reverse=True)
sort_sum_sim_tags = sorted(sum_sim_tags, key=lambda x: x[1], reverse=True)
sort_max_sim_tags = sorted(max_sim_tags, key=lambda x: x[1], reverse=True)
# map each index to its classifications
by_sum_body = {x[0]: i for i, x in enumerate(sort_sum_sim_body)}
by_max_body = {x[0]: i for i, x in enumerate(sort_max_sim_body)}
by_sum_tags = {x[0]: i for i, x in enumerate(sort_sum_sim_tags)}
by_max_tags = {x[0]: i for i, x in enumerate(sort_max_sim_tags)}
# compute the mean classification for each index
mean_index = []
for i in range(nunans):
mean = (by_sum_body[i] + by_sum_tags[i] + by_max_body[i] + by_max_tags[i]) / 4
mean_index.append((mean, i))
# build the final recommended feed order
by_mean = [x[1] for x in sorted(mean_index)]
return by_mean, None
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
810
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/cache.py
|
"""Cache Tool for Answerable
This file contains the functions to access and modify cached content.
It may be used by different modules, so each function requires a category argument
to avoid collisions.
As every function is intended to serve a secondary role in extern functions, the
logs have an extra level of indentation.
"""
import json
import pathlib
from datetime import datetime as dt
from datetime import timedelta as td
from tools.log import log
from tools.displayer import fg, green, magenta
__cache_dir = ".cache"
def check(category: str, _file: str, max_delta: td) -> (bool, pathlib.Path):
"""Return if a file is cached and where it is located.
Returns:
(B, P) where
- B is true if the content is cached and usable
- P is the path where the cached content is/should be.
Parameters:
category: Folder inside the cache.
_file: File name to look for.
max_delta: Timedelta used as threshold to consider a file too old.
"""
# Prepare the path to the cached file
subpath = pathlib.Path(category) / _file
path = pathlib.Path.cwd() / __cache_dir / subpath
path.parent.mkdir(parents=True, exist_ok=True)
try:
if not path.exists():
log(" Miss {}", fg(subpath, magenta))
return False, path
else:
# Check if the file is too old
log(" Hit {}", fg(subpath, green))
modified = dt.fromtimestamp(path.stat().st_mtime)
now = dt.now()
delta = now - modified
log(" Time passed since last fetch: {}", delta)
valid = delta < max_delta
if valid:
log(fg(" Recent enough", green))
else:
log(fg(" Too old", magenta))
return valid, path
except OSError as err:
log(" {}: {}", err, fg(subpath, magenta))
return False, path
def update(category: str, _file: str, obj, json_format=True):
"""Update or create a file in the cache
Parameters:
category: Folder inside the cache.
_file: File name to store in.
obj: Serializable object to store.
"""
subpath = pathlib.Path(category) / _file
path = pathlib.Path.cwd() / __cache_dir / subpath
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w") as fh:
if json_format:
json.dump(obj, fh, indent=2)
else:
fh.write(obj)
log(" Cache updated: {}", fg(subpath, green))
except OSError as err:
log(" {}: {}", err, fg(subpath, magenta))
return False, path
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
811
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/fetcher.py
|
"""Fetcher Tool for Answerable
This file contains the high level functions in charge of data retrieval.
It provides a interface between the spider/crawler and another level of
cacheable information.
"""
import math
import json
from datetime import timedelta as td
from bs4 import BeautifulSoup
from tools import spider, cache
from tools.log import log, abort
from tools.displayer import fg, magenta, green, bold
cache_where = "fetcher"
cache_threshold = td(hours=12)
def get_questions(question_ids):
"""Retrieve questions from Stack Overflow
- question_ids: list of question IDs
Returns a list of objects with the following attributes:
{
"tags": [string],
"answers": [ {"owner": {"user_id": int}} ],
"score": int,
"creation_date": timestamp,
"question_id": int,
"link": string,
"title": string,
"body": string (html)
}
"""
# about this request: https://api.stackexchange.com/docs/questions-by-ids#page=1&pagesize=100&order=desc&sort=creation&ids=67519195&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA&site=stackoverflow
api_request_f = "https://api.stackexchange.com//2.2/questions/{}?page={}&pagesize=100&order=desc&sort=creation&site=stackoverflow&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA"
max_ids = 100 # no more than 100 ids allowed at once
k = math.ceil(len(question_ids) / max_ids)
log(f"{len(question_ids)} questions, {k} batches")
questions = []
for i in range(k):
log(f"batch {i+1}")
batch_begin = i * max_ids
batch_end = i * max_ids + max_ids
subset = ";".join(question_ids[batch_begin:batch_end])
page = 1
while True:
api_request = api_request_f.format(subset, page)
response = spider.get(
api_request, delay=0.5, use_cache=False
) # urls too long to cache
if response.status_code != 200:
abort(response)
result = json.loads(response.content)
questions += result["items"]
if not result["has_more"]:
break
page += 1
return questions
def get_user_answers(user_id, force_reload=False, max_page=math.inf):
"""Retrieve answers from a Stack Overflow user
- user_id: user ID
Returns a list of objects with the following attributes:
{
"is_accepted": bool,
"score": int,
"questions_id": int,
"link": string,
"title": string,
"body": string (html),
}
"""
api_request_f = "https://api.stackexchange.com/2.2/users/{}/answers?page={}&pagesize=100&order=desc&sort=activity&site=stackoverflow&filter=!37n)Y*a2Ut6eDilfH4XoIior(X(b8nm7Z-g)Tgl*A4Qdfe8Mcn-Luu"
page = 1
answers = []
while page <= max_page:
api_request = api_request_f.format(user_id, page)
response = spider.get(
api_request, delay=0.5, max_delta=td() if force_reload else td(hours=12)
)
if response.status_code != 200:
abort(response)
result = json.loads(response.content)
answers += result["items"]
if not result["has_more"]:
break
page += 1
return answers
def get_QA(user_id, force_reload=False, max_page=5):
"""Retrieve information about the questions answered by the user
Return
[
(Question_1, Answer_1),
(Question_2, Answer_2),
...
]
See
get_questions, get_user_answers
"""
log(bold("Fetching user information"))
if force_reload:
log(fg("Force reload", magenta))
cache_file = str(user_id) + ".json"
# Check cache
if not force_reload:
hit, fpath = cache.check(cache_where, cache_file, cache_threshold)
if hit:
with open(fpath) as fh:
stored = json.load(fh)
return stored
# Get the answers
answers = get_user_answers(user_id, force_reload, max_page)
# Get the questions
q_ids = [str(a["question_id"]) for a in answers]
questions = get_questions(q_ids)
# Join answers and questions
user_qa = [
(q, a)
for q in questions
for a in answers
if q["question_id"] == a["question_id"]
]
cache.update(cache_where, cache_file, user_qa)
for q, a in user_qa:
a["tags"] = q["tags"]
## Include questions specified by user
try:
with open("include.txt", "r") as f:
extra_q_ids = f.read().split()
log("Aditional training: " + str(extra_q_ids))
extra_questions = get_questions(extra_q_ids)
except FileNotFoundError:
extra_questions = []
log("No additional training specified by user")
user_qa += [(q, None) for q in extra_questions]
return user_qa
def get_question_feed(url, force_reload=False):
"""Retrieve the last questions of the feed
Returns a structure with the following format:
[Question_1, Question_2, ...]
where Question_n has the following keys:
link: str
title: str
body: str (html)
tags: list of str
"""
log(bold("Fetching question feed"))
if force_reload:
log(fg("Force reload", magenta))
feed = spider.get_feed(url, force_reload=force_reload)
if feed.status == 304: # Not Modified
log(fg("Feed not modified since last retrieval (status 304)", magenta))
return []
log("Number of entries in feed: {}", fg(len(feed.entries), green))
questions = []
for entry in feed.entries:
soup = BeautifulSoup(entry.summary, "html.parser")
q = {
"link": entry.link,
"title": entry.title,
"body": soup.getText(" ", strip=True),
"tags": [x["term"] for x in entry.tags],
}
questions.append(q)
return questions
def get_user_tags(filename):
"""Parse the tags file and return the user followed and ignored tags"""
try:
with open(filename, "r") as fh:
bs = BeautifulSoup(fh.read(), "html.parser")
return {
"followed": [
x.getText(" ", strip=True)
for x in bs.find(id="watching-1").find_all("a", class_="post-tag")
],
"ignored": [
x.getText(" ", strip=True)
for x in bs.find(id="ignored-1").find_all("a", class_="post-tag")
],
}
except FileNotFoundError:
abort("File not found: {}", filename)
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
812
|
limkokholefork/Answerable
|
refs/heads/main
|
/models/content_based_1.py
|
"""Recommender Tool for Answerable
This file contains the recommendation algorithm.
"""
import tools.displayer
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
import numpy as np
import re
def preprocessed_text_from_html(html):
soup = bs(html, "html.parser")
for tag in soup.findAll(name="code"):
tag.decompose()
text = soup.getText(" ", strip=True)
text = re.sub(r"\d+", "", text)
text = " ".join(re.findall(r"[\w+_]+", text))
return text.lower()
def recommend(user_qa, feed):
answered = [
" ".join(x["tags"])
+ " "
+ x["title"].lower()
+ " "
+ preprocessed_text_from_html(x["body"])
for [x, _] in user_qa
]
unanswered = [
" ".join(x["tags"])
+ " "
+ x["title"].lower()
+ " "
+ preprocessed_text_from_html(x["body"])
for x in feed
]
nans = len(answered)
tfidf = TfidfVectorizer(stop_words="english")
# list of vectorized text
tfidf_matrix = tfidf.fit_transform(answered + unanswered)
# similarity matrix of each answer with the rest
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# rows: unanswered, cols: answered
unans_similarity = cosine_sim[nans:, :nans]
# index: unanswered. values: max similarity, text size and score
max_sim = list(enumerate([max(r) for r in unans_similarity]))
unans_sizes = [len(u.split()) for u in unanswered]
score = [x * x * unans_sizes[i] for i, x in max_sim]
# sort the indices by the value
by_score = sorted(list(enumerate(score)), key=lambda x: x[1], reverse=True)
# relation between index in feed and index of closest answered
closest = [
(i, np.where(np.isclose(unans_similarity[i], v))[0][0]) for i, v in max_sim
]
# store displayable information
b = tools.displayer.bold
info_f = "{}: {{}}\n{}:{{}} {}: {{}} {}: {{}}".format(
b("Closest"),
b("Text size"),
b("Similarity"),
b("Score"),
)
info = []
for unans, ans in closest:
info.append(
info_f.format(
user_qa[ans][0]["title"],
unans_sizes[unans],
f"{100*max_sim[unans][1]:.2f}%",
f"{score[unans]:.2f}",
)
)
# get the indexes, now sorted
sorted_index = [x[0] for x in by_score]
return sorted_index, info
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
813
|
limkokholefork/Answerable
|
refs/heads/main
|
/answerable.py
|
import re
import json
import argparse
import datetime
import textwrap
import importlib
from urllib.error import URLError
from tools import fetcher, displayer, log, spider
_current_version = "v1.1"
def latest_version():
try:
res = spider.get(
"https://api.github.com/repos/MiguelMJ/Answerable/releases/latest", 0
)
if res.status_code != 200:
log.warn("Unable to get information from latest version")
return None
latest = re.search(r"v[\d.]+.?", json.loads(res.content)["name"])[0]
return latest
except URLError:
log.warn("Unable to get information from latest version")
return None
_config_file = ".config"
def get_user_tags(args):
"""Return the tags if the args contain the tags file
If the user used the -t option, parse the specified file. Otherwise,
return None
"""
if args.tags is not None:
return fetcher.get_user_tags(args.tags)
else:
log.log("No tags file provided.")
return None
def load_config(args) -> dict:
"""Return the user configuration
If the _config_file exists, return its contents. Otherwise, extract the
the configuration from the options -u, -t and -m
"""
try:
with open(_config_file) as fh:
file_config = json.load(fh)
except IOError:
file_config = {}
finally:
default_config = {"model": "content_based_1"}
cli_config = {"user": args.user, "tags": args.tags, "model": args.model}
cli_config = {k: v for k, v in cli_config.items() if v is not None}
config = {**default_config, **file_config, **cli_config}
if config["user"] is None:
log.abort(".config not found: provide user id with -u option")
return config
def save_config(args):
"""Store the user configuration
Create or overwrite the configuration file with the configuration extracted
from the options -u and -t.
"""
with open(_config_file, "w") as fh:
tags = get_user_tags(args)
json.dump(
{"user": args.user, "tags": tags, "model": args.model or "content_based_1"},
fh,
indent=2,
)
log.log("Configuration saved in {}", _config_file)
def summary(args):
"""Display a summary of the answered questions"""
config = load_config(args)
qa = fetcher.get_QA(config["user"], force_reload=args.f)
qa = [(q, a) for q, a in qa if a is not None]
displayer.disp_statistics(qa)
def recommend(args):
"""Recommend questions from the latest unanswered"""
filtered = {"hidden": 0, "closed": 0, "duplicate": 0}
def valid_entry(entry):
"""Check if a entry should be taken into account"""
if len(set(entry["tags"]) & hide_tags) > 0:
filtered["hidden"] += 1
return False
if entry["title"][-8:] == "[closed]":
filtered["closed"] += 1
return False
if entry["title"][-11:] == "[duplicate]":
filtered["duplicate"] += 1
return False
return True
def cf(x):
"""Color a value according to its value"""
return (
displayer.fg(x, displayer.green)
if x == 0
else displayer.fg(x, displayer.magenta)
)
# Load configuration
config = load_config(args)
# Load the model
try:
model_name = config["model"]
log.log("Loading model {}", displayer.fg(model_name, displayer.yellow))
model = importlib.import_module(f".{model_name}", "models")
log.log(
"Model {} succesfully loaded", displayer.fg(model_name, displayer.green)
)
except ModuleNotFoundError as err:
if err.name == f"models.{model_name}":
log.abort("Model {} not present", model_name)
else:
log.abort("Model {} unsatisfied dependency: {}", model_name, err.name)
# Get user info and feed
user_qa = fetcher.get_QA(config["user"], force_reload=args.f)
if args.all or "tags" not in config:
tags = ""
else:
tags = "tag?tagnames="
tags += "%20or%20".join(config["tags"]["followed"]).replace("+", "%2b")
tags += "&sort=newest"
url = "https://stackoverflow.com/feeds/" + tags
try:
feed = fetcher.get_question_feed(url, force_reload=args.F)
if len(feed) == 0:
raise ValueError("No feed returned")
# Filter feed from ignored tags
hide_tags = (
set()
if args.all or "tags" not in config
else set(config["tags"]["ignored"])
)
useful_feed = [e for e in feed if valid_entry(e)]
if len(useful_feed) == 0:
raise ValueError("All feed filtered out")
log.log(
"Discarded: {} ignored | {} closed | {} duplicate",
cf(filtered["hidden"]),
cf(filtered["closed"]),
cf(filtered["duplicate"]),
)
# Make the recommendation
log.log(f"Corpus size: {len(user_qa)} Feed size: {len(useful_feed)}")
rec_index, info = model.recommend(user_qa, useful_feed)
selection = [useful_feed[i] for i in rec_index[: args.limit]]
if args.info and info is None:
log.warn("Info requested, but model {} returns None", model_name)
elif args.info and info is not None:
info = [info[i] for i in rec_index[: args.limit]]
displayer.disp_feed(selection, info, args.info)
except ValueError as err:
log.warn(err)
log.print_advice()
def parse_arguments() -> argparse.Namespace:
"""Parse the command line arguments
Parse sys.argv into a Namespace, that will be used in the rest of the
functions.
"""
parser = argparse.ArgumentParser(
usage="%(prog)s COMMAND [OPTIONS]",
description=f"Answerable {_current_version}\nStack Overflow unanswered questions recommendation system",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
Code: https://github.com/MiguelMJ/Answerable
Documentation: in https://github.com/MiguelMJ/Answerable/wiki
"""
),
)
parser.add_argument(
"command",
choices=("save", "summary", "recommend"),
help="save,summary,recommend",
metavar="COMMAND",
)
parser.add_argument(
"-v",
"--verbose",
help="show the log content in stderr too",
action="store_true",
)
parser.add_argument(
"-i",
"--info",
help="print extra info on each recomendation",
action="store_true",
)
parser.add_argument("--no-ansi", help="print without colors", action="store_true")
parser.add_argument("-f", help="force reload of user data", action="store_true")
parser.add_argument(
"-F", help="force retrieval of question feed", action="store_true"
)
parser.add_argument(
"-l",
"--limit",
help="limit the number of items displayed",
type=int,
default=999,
metavar="N",
)
parser.add_argument(
"-a",
"--all",
help="don't use tags to filter the feed. If the user tags haven't been saved before with the <save> command, this option is on by default",
action="store_true",
)
parser.add_argument(
"-u", "--user", help="identifier of Stack Overflow user", metavar="ID"
)
parser.add_argument(
"-t",
"--tags",
help="file with the source of the page with the user followed and ignored tags",
metavar="FILE",
)
parser.add_argument(
"-m",
"--model",
help="specify the recommendation model you want to use",
metavar="MODEL",
)
args = parser.parse_args()
if args.no_ansi:
displayer.ansi = False
return args
if __name__ == "__main__":
_latest_version = latest_version()
if _latest_version is not None and _latest_version != _current_version:
log.warn(
f"New version on GitHub: {_latest_version} (current is {_current_version})"
)
switch = {
"save": save_config,
"summary": summary,
"recommend": recommend,
}
args = parse_arguments()
command = args.command
log.add_log("answerable.log")
if args.verbose:
log.add_stderr()
log.log(displayer.bold("Log of {}"), datetime.datetime.now())
switch[command](args)
log.close_logs()
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
814
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/statistics.py
|
"""Statistics Tool for Answerable
This file contains the functions used to analyze user answers.
"""
#
# TAG RELATED METRICS (USING QA)
#
_tags_info = None
def tags_info(qa):
"""Map each tag to its score, acceptance and count"""
global _tags_info
if _tags_info is not None:
return _tags_info
tags_info = {}
for _, a in qa:
for t in a["tags"]:
tc = tags_info.get(t, (0, 0, 0)) # (score, acceptance, count)
tc = (tc[0] + a["score"], tc[1] + a["is_accepted"], tc[2] + 1)
tags_info[t] = tc
_tags_info = tags_info
return tags_info
def top_tags_use(qa, top=5):
"""Top tags by appearance"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][2], reverse=True)
return [(x, tags[x][2]) for x in sorted_tags][:top]
def top_tags_score_abs(qa, top=5):
"""Top tags by accumulated score"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0], reverse=True)
return [(x, tags[x][0]) for x in sorted_tags][:top]
def top_tags_acceptance_abs(qa, top=5):
"""Top tags by accumulated acceptance"""
tags = tags_info(qa)
sorted_tags = sorted(
tags,
key=lambda x: tags[x][1],
reverse=True,
)
return [(x, tags[x][1]) for x in sorted_tags][:top]
def top_tags_score_rel(qa, top=5):
"""Top tags by score per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0] / tags[x][2], reverse=True)
return [(x, tags[x][0] / tags[x][2]) for x in sorted_tags][:top]
def top_tags_acceptance_rel(qa, top=5):
"""Top tags by acceptance per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][1] / tags[x][2], reverse=True)
return [(x, tags[x][1] / tags[x][2]) for x in sorted_tags][:top]
#
# ANSWER RELATED METRICS
#
def top_answers(answers, top=5):
"""Top answers by score"""
return sorted(answers, key=lambda x: x["score"], reverse=True)[:top]
def top_accepted(answers, top=5):
"""Top accepted answers by score"""
return list(
filter(
lambda x: x["is_accepted"],
sorted(answers, key=lambda x: x["score"], reverse=True),
)
)[:top]
#
# REPUTATION RELATED METRICS
#
def reputation(answer):
"""Reputation associated to an answers
NOT ACCURATE
"""
return answer["score"] * 10 + answer["is_accepted"] * 15
_answers_sorted_reputation = None
_total_reputation = None
def answers_sorted_reputation(answers):
"""Answers sorted by associated reputation"""
global _answers_sorted_reputation
if _answers_sorted_reputation is None:
_answers_sorted_reputation = sorted(
answers, key=lambda x: reputation(x), reverse=True
)
return _answers_sorted_reputation
def total_reputation(answers):
"""Total reputation gained from answers"""
global _total_reputation
if _total_reputation is None:
_total_reputation = sum([reputation(a) for a in answers])
return _total_reputation
def average_reputation_weight(answers, w):
"""Average reputation and weight of answers generating w % reputation"""
repw = total_reputation(answers) * w
sorted_answers = answers_sorted_reputation(answers)
acc_rep = 0
acc_ans = 0
while acc_rep < repw and acc_ans < len(sorted_answers):
acc_rep += reputation(sorted_answers[acc_ans])
acc_ans += 1
if acc_ans == 0:
return (0, 0)
return (acc_rep / acc_ans, 100 * acc_ans / len(answers))
#
# LISTS TO SIMPLIFY CALLING
#
tag_metrics = [ # call with qa
("Top used tags", top_tags_use),
("Top tags by accumulated score", top_tags_score_abs),
("Top tags by score per answer", top_tags_score_rel),
("Top tags by accumulated acceptance", top_tags_acceptance_abs),
("Top tags by acceptance per answer", top_tags_acceptance_rel),
]
answer_metrics_single = [ # call with answers
("Answers analyzed", len),
("Total score", lambda x: sum([a["score"] for a in x])),
("Average score", lambda x: sum([a["score"] for a in x]) / len(x)),
("Total accepted", lambda x: sum([a["is_accepted"] for a in x])),
("Acceptance ratio", lambda x: sum([a["is_accepted"] for a in x]) / len(x)),
]
answer_metrics_tops = [ # call with answers
("Top answers by score", top_answers, lambda a: a["score"]),
("Top accepted answers by score", top_accepted, lambda a: a["score"]),
]
reputation_metrics_single = [ # call with answers
("Total reputation", lambda x: sum([reputation(a) for a in x])),
("Average reputation", lambda x: sum([reputation(a) for a in x]) / len(x)),
]
reputation_weight_metrics = ( # call with answers and weights
[0.95, 0.80],
average_reputation_weight,
(
"Average reputation on answers generating {:.0f}% reputation",
"Percentage of answers generating {:.0f}% reputation",
),
)
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
815
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/displayer.py
|
"""Displayer Tool for Answerable
This file contains the functions and variables used to present the data.
"""
import tools.statistics as st
#
# COLOR RELATED VARIABLES AND FUNCTIONS
#
red = (250, 0, 0)
green = (0, 250, 0)
blue = (0, 0, 250)
cyan = (0, 250, 250)
magenta = (250, 0, 250)
yellow = (250, 250, 0)
"""
white = (250, 250, 250)
gray1 = (200, 200, 200)
gray2 = (150, 150, 150)
gray3 = (100, 100, 100)
gray4 = (50, 50, 50)
black = (0, 0, 0)
"""
def lighten(c, r):
dr = (250 - c[0]) * r
dg = (250 - c[1]) * r
db = (250 - c[2]) * r
return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db))
def darken(c, r):
dr = c[0] * r
dg = c[1] * r
db = c[2] * r
return (int(c[0] - dr), int(c[1] - dg), int(c[2] - db))
"""
def interpolate(c, d, r):
dr = (d[0] - c[0]) * r
dg = (d[1] - c[1]) * r
db = (d[2] - c[2]) * r
return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db))
"""
#
# ANSI RELATED VARIABLES AND FUNCTIONS
#
ansi = True
def bold(msg):
if not ansi:
return msg
return "\033[1m{}\033[0m".format(msg)
def fg(msg, color):
if not ansi:
return msg
return "\033[38;2;{:03};{:03};{:03}m{}\033[0m".format(
color[0], color[1], color[2], msg
)
def bg(msg, color):
if not ansi:
return msg
return "\033[48;2;{:03};{:03};{:03}m{}\033[0m".format(
color[0], color[1], color[2], msg
)
def color(msg, fgc, bgc):
return bg(fg(msg, fgc), bgc)
#
# DATA DISPLAY FUNCTIONS
#
def disp_feed(feed, info, print_info=False):
def title(x):
return fg(bold(x), lighten(blue, 0.3))
def tag(x):
return fg(f"[{x}]", darken(cyan, 0.2))
for i in range(len(feed)):
entry = feed[i]
print("o", title(entry["title"]))
print(" ", " ".join(tag(t) for t in entry["tags"]))
print(" ", entry["link"])
if print_info and info is not None:
print(" ", info[i].replace("\n", "\n "))
def table(data, align=""):
cols = len(data[0])
widths = []
for i in range(0, cols):
col = [x[i] for x in data]
widths.append(max([len(str(c)) for c in col]))
row_f = " ".join(["{{:{}{}}}".format(align, w) for w in widths])
for d in data:
print(row_f.format(*d))
def disp_statistics(user_qa):
ans_f = fg("{}", lighten(blue, 0.3))
tag_f = fg("[{}]", darken(cyan, 0.2))
val_f = bold(fg("{}", green))
def print_section(txt):
print(bold(txt.upper()))
print()
def print_metric(txt):
def mark(x):
return bold(x)
print(mark(txt))
def print_answer_and_value(answer, value):
tags = answer["tags"]
print(val_f.format(value), ans_f.format(answer["title"]))
print(" " * len(str(value)), " ".join([tag_f.format(t) for t in tags]))
user_answers = [a for q, a in user_qa]
print_section("Answer metrics")
metrics = [
(bold(k), val_f.format(m(user_answers))) for k, m in st.answer_metrics_single
]
table(metrics)
print()
for (name, metric, key) in st.answer_metrics_tops:
print_metric(name)
results = metric(user_answers)
for a in results:
print_answer_and_value(a, key(a))
print()
print_section("Tag metrics")
for (name, metric) in st.tag_metrics:
print_metric(name)
results = metric(user_qa)
results = [(tag_f.format(r[0]), val_f.format(r[1])) for r in results]
table(results)
print()
print_section("Reputation metrics")
metrics = [
(bold(k), val_f.format(m(user_answers)))
for k, m in st.reputation_metrics_single
]
table(metrics)
print()
for w in st.reputation_weight_metrics[0]:
results = st.reputation_weight_metrics[1](user_answers, w)
for i, info in enumerate(st.reputation_weight_metrics[2]):
print_metric(info.format(w * 100))
print(val_f.format(results[i]))
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
816
|
limkokholefork/Answerable
|
refs/heads/main
|
/tools/log.py
|
"""Log Tool for Answerable
This file contains the functions used to log control data and debug messages
in a unified format.
"""
import re
import sys
import inspect
from tools.displayer import bold, red, magenta, fg
_logs = [] # list of file handlers
_ansire = re.compile("\\033\[[^m]+m") # ansi escape sequences
def _strip_ansi(msg):
"""Strip ansi escape sequences"""
return re.sub(_ansire, "", msg)
def _get_caller():
frm = inspect.stack()[2]
return inspect.getmodule(frm[0]).__name__
def add_stderr():
"""Add the stderr to the log file handlers"""
_logs.append(sys.stderr)
def add_log(logfile):
"""Open a new file and add it to the log file handlers"""
_logs.append(open(logfile, "w"))
def close_logs():
"""Close all log file handlers."""
for f in _logs:
if f is not sys.stderr:
f.close()
def advice_message():
"""Returns the advice of where to find the full logs"""
lognames = ", ".join([fh.name for fh in _logs if fh is not sys.stderr])
return "Full log in " + lognames
def abort(msg, *argv):
"""Print an error message and aborts execution"""
if sys.stderr not in _logs:
add_stderr()
log(fg(msg, red), *argv, who=_get_caller())
print_advice()
close_logs()
exit()
def warn(msg, *argv):
"""Print an error message and aborts execution"""
err_off = sys.stderr not in _logs
if err_off:
add_stderr()
log(fg(msg, magenta), *argv, who=_get_caller())
_logs.pop()
def print_advice():
"""Print where to find the full log if necessary"""
if sys.stderr not in _logs:
print(advice_message(), file=sys.stderr)
def log(msg, *argv, **kargs):
"""Print to logs a formatted message"""
who = kargs["who"] if "who" in kargs else _get_caller()
who = f"[{who}] "
textf = who + _strip_ansi(msg.format(*argv))
texts = bold(who) + msg.format(*argv)
for f in _logs:
if f is sys.stderr:
print(texts, file=f)
sys.stderr.flush()
else:
print(textf, file=f)
|
{"/tools/spider.py": ["/tools/displayer.py", "/tools/log.py"], "/tools/cache.py": ["/tools/log.py", "/tools/displayer.py"], "/tools/fetcher.py": ["/tools/log.py", "/tools/displayer.py"], "/models/content_based_1.py": ["/tools/displayer.py"], "/tools/displayer.py": ["/tools/statistics.py"], "/tools/log.py": ["/tools/displayer.py"]}
|
850
|
riti121/cafe
|
refs/heads/master
|
/facialrecognition.py
|
"""
import tensorflow as tf
modeltest=tf.keras.models.load_model("facial_1 (1)")
print("--model loaded successfully--")
"""
import cv2
import sys
import os
class FaceCropper(object):
cascades_path = 'haarcascade_frontalface_default.xml'
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(self.cascades_path)
def generate(self, image_path, show_result):
name=""
img = cv2.imread(image_path)
if (img is None):
print("Can't open image file")
return 0
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(img, 1.1, 3, minSize=(100, 100))
if (faces is None):
print('Failed to detect face')
return 0
if (show_result):
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
roi_color = img[y:y + h, x:x + w]
print("[INFO] Object found. Saving locally.")
name= str(w) + str(h) + '_faces.jpg'
cv2.imwrite(str(w) + str(h) + '_faces.jpg', roi_color)
#cv2.imshow('cropped image',roi_color)
#cv2.imshow('marked image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
facecnt = len(faces)
print("Detected faces: %d" % facecnt)
i = 0
height, width = img.shape[:2]
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img[ny:ny+nr, nx:nx+nr]
lastimg = cv2.resize(faceimg, (32, 32))
i += 1
cv2.imwrite("image%d.jpg" % i, lastimg)
return name
#fc=FaceCropper().generate("IMG_20200226_000431.png",True)
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
851
|
riti121/cafe
|
refs/heads/master
|
/backup(main).py
|
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
import bot
import time
import tensorflow as tf
import facialrecognition as fr
import cv2
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
emo = ['Angry', 'Fear', 'Happy',
'Sad', 'Surprise', 'Neutral']
model = tf.keras.models.load_model("facial_1 (1)")
buddy=''
mood=''
def prepare(self, filepath):
IMG_SIZE = 48
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def capture(self):
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
name="IMG_{}.png".format(timestr)
camera.export_to_png(name)
print("Captured")
fc=fr.FaceCropper().generate(name,True)
try:
prediction = self.model.predict([self.prepare(fc)])
prediction=list(map(float,prediction[0]))
except:
prediction="prepare function could not run(0 faces detected)"
self.mood='Neutral'
print(prediction)
try:
self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]
except:
print("Exception handled..!! Picture could not be cleared properly. Please check lighting")
self.mood='Neutral'
bot.setname(self.textforcamera.text)
print(bot.getname())
ChatWindow.mood=self.mood
class ChatWindow(Screen):
mood=''
bot.pre_processing()
#bot.chatcode()
def on_enter(self, *args):
self.chat_history.text="Hey "+bot.getname()+", what brings you here today!!\n Current Mood: "+self.mood+" !! "
def send_message(self):
message=self.text.text
self.text.text=''
#self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')
self.chat_history.text += '\n' +"User: "+message
# Set layout height to whatever height of chat history text is + 15 pixels
# (adds a bit of space at teh bottom)
# Set chat history label to whatever height of chat history text is
# Set width of chat history text to 98 of the label width (adds small margins)
#self.layout.height = self.chat_history.texture_size[1] + 15
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run()
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
852
|
riti121/cafe
|
refs/heads/master
|
/test.py
|
lst=sorted(list(map(int,input().split())))[::-1]
temp=lst[0]
for i in range(1,len(lst)):
if temp==lst[i]:
continue
else:
print(lst[i])
break
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
853
|
riti121/cafe
|
refs/heads/master
|
/CafeApp.py
|
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
from gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string
import bot
import time
import tensorflow as tf
import facialrecognition as fr
import cv2
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
emo = ['Angry', 'Fear', 'Happy',
'Sad', 'Surprise', 'Neutral']
model = tf.keras.models.load_model("facial_1 (1)")
buddy=''
mood=''
def prepare(self, filepath):
IMG_SIZE = 48
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def capture(self):
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
name="IMG_{}.png".format(timestr)
camera.export_to_png(name)
print("Captured")
fc=fr.FaceCropper().generate(name,True)
try:
prediction = self.model.predict([self.prepare(fc)])
prediction=list(map(float,prediction[0]))
except:
prediction="prepare function could not run(0 faces detected)"
self.mood='Neutral'
print(prediction)
try:
self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]
except:
print("Exception handled..!! Picture could not be captured properly. Please check lighting")
self.mood='Neutral'
bot.setname(self.textforcamera.text)
print(bot.getname())
ChatWindow.mood=self.mood
self.textforcamera.text = ''
class ChatWindow(Screen):
one=True
prev=""
mood=''
bot.pre_processing()
counter=1
#bot.chatcode()
def on_enter(self, *args):
print(self.mood)
greeting_msg="Hey "+bot.getname()+", my name is Cafe Buddy consider me a friend of yours!!\n"
#self.chat_history.text="Hey "+bot.getname()+", what brings you here today!!\n Current Mood: "+self.mood+" !! "
#emo = ['Angry', 'Fear', 'Happy','Sad', 'Surprise', 'Neutral']
if self.mood=='Happy':
buddy_msg="you seem quite happy. Is there still anything that disturbs you?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Angry':
buddy_msg="you seem quite disturbed. Is there anything that disturbs you?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Fear' or self.mood=='Surprise' or self.mood=='Neutral':
buddy_msg="Is everything okay? You are looking stressed?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Sad':
buddy_msg="hey, what is it that worries you so much? Why are you looking so sad?\n"
self.chat_history.text=greeting_msg+buddy_msg
def send_message(self):
message=self.text.text
self.text.text=''
#self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')
self.chat_history.text = '\n' +"User: "+message
if self.mood=='Happy':
if self.counter==1:
if (bot.predict(message) >= 0.55):
buddy_msg='That is good. In case you ever feel otherways. Please feel free to have a session with me\n'
else:
self.mood='Neutral'
buddy_msg = 'Please express yourself freely, i am hearing.\n'
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
else:
print(self.counter)
if self.counter==1:
keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]
print(keyword)
if len(keyword)>0:
buddy_msg = 'Will you please tell me in a bit more detail about it?'
self.one=True
else:
buddy_msg='I understand. Seems like something\'s bothering you. '\
'Could you further describe it, in short?'
self.one=False
self.counter+=1
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
elif self.counter==2:
if self.one==True:
keyword=[]
print(bot.predict(message))
keyword.extend([preprocess_string(message.lower())][0])
print(keyword)
if 'friend' in keyword and bot.predict(message)[0][0] <= 0.6:
buddy_msg = "Many people tend to expect too much of others, their family, "\
"their friends or even just acquaintances. It's a usual mistake"\
", people don't think exactly the way you do.\nDon't let the "\
"opinions of others make you forget what you deserve. You are "\
"not in this world to live up to the expectations of others, "\
"nor should you feel that others are here to live up to yours."\
"\nThe first step you should take if you want to learn how to "\
"stop expecting too much from people is to simply realize and "\
"accept the fact that nobody is perfect and that everyone "\
"makes mistakes every now and then."
elif 'work' in keyword or 'studi' in keyword or 'exam' in keyword:
if bot.predict(message)[0][0] <= 0.6:
buddy_msg = bot.getname() + ", don't take too much stress. I can list some really cool "\
"ways to handle it.\nYou should develop healthy responses which "\
"include doing regular exercise and taking good quality sleep. "\
"You should have clear boundaries between your work or academic "\
"life and home life so you make sure that you don't mix them.\n"\
"Tecniques such as meditation and deep breathing exercises can be "\
"really helping in relieving stress.\n Always take time to "\
"recharge so as to avoid the negative effects of chronic stress "\
"and burnout. We need time to replenish and return to our pre-"\
"stress level of functioning."
elif 'famili' in keyword and bot.predict(message)[0][0]<=0.6:
buddy_msg=bot.getname() + ", don't take too much stress. All you need to do is adjust "\
"your priorities. Don't take on unnecessary duties and "\
"responsibilities.\nTake advice from people whose opinion you "\
"trust, and get specific advice when issues arise.\nYou should "\
"use stress management techniques and always hope for the best. "\
"These situations arise in everyone's life and what matters the "\
"most is taking the right decision at such moments."
else:
if self.prev == "":
buddy_msg="It's ohk can you tell me something about your day... Did anything happen today that made you feel worried?\n"
self.prev="same"
self.one=False
else:
buddy_msg='It looks like you might be feeling comfortable talking '\
'about yourself. Could you share your feelings?\n'
self.one=False
self.counter+=1
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
elif self.counter==3:
if not self.one:
print("Welcome to level 3")
keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]
if len(keyword)>0:
buddy_msg = 'Will you please tell me in a bit more detail about it?'
self.one=True
self.counter=2
else:
buddy_msg= 'I see. Among the thoughts occuring in your mind, which one upsets you the most and why?\n'
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run()
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
854
|
riti121/cafe
|
refs/heads/master
|
/CafeBuddy-Ayushi/temp.py
|
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
import time
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
pass
class ChatWindow(Screen):
pass
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design_edit.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run()
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
855
|
riti121/cafe
|
refs/heads/master
|
/temp.py
|
import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
import time
class Home(Screen):
def animation_begins(self):
textvalue=self.labelvalue.text.split()
var=" "
for i in textvalue:
var+=i
self.labelvalue.text=var
time.sleep(3)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('designing.kv')
sm = WindowManager()
screens=[Home(name="home")]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run()
|
{"/backup(main).py": ["/facialrecognition.py"], "/CafeApp.py": ["/facialrecognition.py"]}
|
869
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/sudoku_solver.py
|
import numpy as np
from random import randint
from copy import deepcopy
import cv2
import utils
import grid
# Global variables
sudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 0, 9]]
counter = 0
solutions = []
recur_cnt = 0
def is_possible(y, x, n):
global sudoku_grid
for i in range(0, 9):
if sudoku_grid[y][i] == n:
return False
for j in range(0, 9):
if sudoku_grid[j][x] == n:
return False
x0 = (x//3)*3
y0 = (y//3)*3
for k in range(0, 3):
for l in range(0, 3):
if sudoku_grid[y0+k][x0+l] == n:
return False
return True
def solve_recursion():
global sudoku_grid, counter, solutions, recur_cnt
recur_cnt += 1
if recur_cnt > 10**5:
return
for y in range(9):
for x in range(9):
if sudoku_grid[y][x] == 0:
for n in range(1, 10):
if is_possible(y, x, n):
sudoku_grid[y][x] = n
solve_recursion()
sudoku_grid[y][x] = 0
return
counter += 1
solutions.append(deepcopy(sudoku_grid))
def main():
global sudoku_grid, counter, solutions
model = utils.load_mnist_model()
img = cv2.imread("./SudokuOnline/puzzle1.jpg")
sudoku_grid = grid.recognize_grid(model, img)
solve_recursion()
print("Number or recurrent function invocations: {}".format(recur_cnt))
print("There are {} possible solutions".format(counter))
if len(solutions) > 0:
print("Random solution:")
solved_grid = solutions[randint(0, counter - 1)]
print(np.matrix(solved_grid))
img_solved = grid.draw_solved_grid(model, img, solved_grid)
cv2.imwrite("./results/result1.jpg", img_solved)
cv2.imshow("Solved sudoku", img_solved)
cv2.waitKey(0)
if __name__ == "__main__":
main()
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
870
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/MNISTmodel/train_mnist1.py
|
import tensorflow as tf
import datetime
import os
import numpy as np
from tensorflow.python.keras.callbacks import TensorBoard
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
# print(training_images.shape)
# print(test_images.shape)
training_images = training_images / 255.0
test_images = test_images / 255.0
training_images = training_images.reshape(training_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
test_images, validation_images = np.split(test_images, [int(test_images.shape[0]*0.4)])
test_labels, validation_labels = np.split(test_labels, [int(test_labels.shape[0]*0.4)])
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
## Designing callbacks
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
print("\nReached {} epoch".format(epoch + 1))
if logs.get('accuracy') > 0.997:
print("Reached 99.99% accuracy so cancelling training!")
self.model.stop_training = True
log_dir = os.path.join(
"logs",
"fit",
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images,
training_labels,
validation_data=(validation_images, validation_labels),
epochs=20,
callbacks=[myCallback(), tensorboard_callback],
verbose=2)
# model.summary()
metrics = model.evaluate(test_images, test_labels)
print("[Loss, Accuracy]")
print(metrics)
model.save("./models/train_mnist1_model3.h5")
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
871
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/solve_sudoku_from_app.py
|
import numpy as np
from random import randint
from copy import deepcopy
import cv2
import utils
import grid
import paho.mqtt.client as mqtt
import io
from PIL import Image
# Global variables
BROKER_ADRESS = "192.168.9.201"
sudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 0, 9]]
counter = 0
solutions = []
recur_cnt = 0
IMG_NAME = 'puzzle1.jpg'
def on_connect(client, userdata, flags, rc):
print("Connected to broker with result code " + str(rc))
client.subscribe("sudoku/#")
def on_message(client, userdata, msg):
global counter
counter = 0
if msg.topic == "sudoku/photo":
try:
stream = io.BytesIO(msg.payload)
open_cv_image = np.array(Image.open(stream).convert('RGB'))
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
cv2.imwrite('./mqtt_com/' + IMG_NAME, open_cv_image)
except Exception as e:
print("Exception: ")
print(e)
solve_sudoku()
send_solution(client)
if msg.payload.decode() == "End":
print("Okey! I'm disconnecting :)")
client.disconnect()
def send_message(client, topic, msg):
client.publish(topic, msg)
def is_possible(y, x, n):
global sudoku_grid
for i in range(0, 9):
if sudoku_grid[y][i] == n:
return False
for j in range(0, 9):
if sudoku_grid[j][x] == n:
return False
x0 = (x//3)*3
y0 = (y//3)*3
for k in range(0, 3):
for l in range(0, 3):
if sudoku_grid[y0+k][x0+l] == n:
return False
return True
def solve_recursion():
global sudoku_grid, counter, solutions, recur_cnt
recur_cnt += 1
if recur_cnt > 10**5:
return
for y in range(9):
for x in range(9):
if sudoku_grid[y][x] == 0:
for n in range(1, 10):
if is_possible(y, x, n):
sudoku_grid[y][x] = n
solve_recursion()
sudoku_grid[y][x] = 0
return
counter += 1
solutions.append(deepcopy(sudoku_grid))
def solve_sudoku():
global sudoku_grid, counter, solutions
model = utils.load_mnist_model()
img = cv2.imread("./mqtt_com/" + IMG_NAME)
sudoku_grid = grid.recognize_grid(model, img)
solve_recursion()
print("Number or recurrent function invocations: {}".format(recur_cnt))
print("There are {} possible solutions".format(counter))
if len(solutions) > 0:
print("Random solution:")
solved_grid = solutions[randint(0, counter - 1)]
print(np.matrix(solved_grid))
img_solved = grid.draw_solved_grid(model, img, solved_grid)
cv2.imwrite("./results/" + IMG_NAME, img_solved)
# cv2.imshow("Solved sudoku", img_solved)
# cv2.waitKey(0)
def send_solution(client):
global solutions, counter
with open("./results/" + IMG_NAME, "rb") as f:
fileContent = f.read()
byteArrayPhoto = bytearray(fileContent)
client.publish("sudoku/solution/photo", byteArrayPhoto)
# client.publish("sudoku/solution/grid", str(solutions[randint(0, counter - 1)]))
def main():
client = mqtt.Client()
client.connect(BROKER_ADRESS, 1883, 60)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
if __name__ == "__main__":
main()
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
872
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/MNISTmodel/test_mnist.py
|
# import tensorflow as tf
import cv2
import sys
sys.path.append("..")
import utils
import numpy as np
model_path = "./models/train_mnist1_model3.h5"
img_path = "../img/seven.png"
# img_path = "../img/one.png"
# img_path = "../img/six.png"
mnist_model = utils.load_model(model_path)
## Way 1
print("Way 1")
digit_img = utils.standarize_digit_img_to_model_input(img_path, 28)
bin_digit_img = utils.binarize_img(digit_img)
img = utils.prepare_to_predict(bin_digit_img)
cv2.imshow("Digit", digit_img)
cv2.imshow("Binary digit", bin_digit_img)
cv2.waitKey(50)
prob_predictions = mnist_model.predict(img)
prediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]
print("Prediction: {}".format(prediction[0]))
## Way 2
print("Way 2")
prediction = utils.predict_digit(mnist_model, img_path)
print("Prediction: {}".format(prediction))
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
873
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/grid.py
|
import cv2
from copy import deepcopy
import numpy as np
import utils
RESCALE = 3
def find_cell_param(joints):
# Set up the detector with default parameters.
params = cv2.SimpleBlobDetector_Params()
# filter by area
params.filterByArea = True
params.minArea = 1
params.maxArea = 50
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(~joints)
sorted_keypoints = sorted(keypoints, key=lambda x: (x.pt[0], x.pt[1]))
min_keypoint = sorted_keypoints[0]
max_keypoint = sorted_keypoints[-1]
# for it, keypoint in enumerate(keypoints):
# img_contours = deepcopy(img)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
# im_with_keypoints = cv2.drawKeypoints(img_contours, [min_keypoint, max_keypoint], np.array([]), (0, 0, 255),
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# cv2.imshow("Keypoints", im_with_keypoints)
# cv2.waitKey(0)
return (max_keypoint.pt[0] - min_keypoint.pt[0]) / 7, (max_keypoint.pt[1] - min_keypoint.pt[1]) / 7, min_keypoint.pt, max_keypoint.pt
def get_joints(img):
img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))
# retval = cv2.getPerspectiveTransform(img) TO DO https://blog.ayoungprogrammer.com/2013/03/tutorial-creating-multiple-choice.html/
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
bin_img = cv2.adaptiveThreshold(cv2.bitwise_not(img_gray), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)
# cv2.imshow("Bin: ", bin_img)
# cv2.waitKey(0)
scale = 20
horizontal_size = bin_img.shape[0] // scale
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
img_eroded_horizontal = cv2.erode(bin_img, horizontal_structure, anchor=(-1, -1))
img_dilated_horizontal = cv2.erode(img_eroded_horizontal, horizontal_structure, anchor=(-1, -1))
vertical_size = bin_img.shape[1] // scale
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
img_eroded_vertical = cv2.erode(bin_img, vertical_structure, anchor=(-1, -1))
img_dilated_vertical = cv2.erode(img_eroded_vertical, vertical_structure, anchor=(-1, -1))
# mask = img_dilated_vertical + img_dilated_horizontal
joints = cv2.bitwise_and(img_dilated_horizontal, img_dilated_vertical)
# cv2.imshow("joints: ", joints)
# cv2.waitKey(0)
return bin_img, joints
def recognize_grid(model, img):
bin_img, joints = get_joints(img)
cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)
grid = []
for x in range(-1, 8):
row = []
for y in range(-1, 8):
roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),
int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]
alpha = 0.1
roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]
row.append(utils.predict_digit(model, roi))
# cv2.imshow("ROI: ", roi)
# cv2.waitKey(0)
grid.append(row)
return grid
def draw_solved_grid(model, img, solved_sudoku):
solved_img = deepcopy(cv2.resize(img, (int(img.shape[1] / RESCALE), int(img.shape[0] / RESCALE))))
bin_img, joints = get_joints(img)
cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)
for x in range(-1, 8):
for y in range(-1, 8):
roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),
int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]
alpha = 0.1
roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]
if utils.predict_digit(model, roi) == 0:
pt = (int((min_pt[0] + cell_height * y + min_pt[0] + cell_height * (y + 1))/2) - 5, int((min_pt[1] + cell_width * x + min_pt[1] + cell_width * (x + 1))/2)+8)
cv2.putText(solved_img, str(solved_sudoku[x+1][y+1]), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
return solved_img
def main():
model = utils.load_mnist_model()
img = cv2.imread("./SudokuOnline/puzzle1.jpg")
sudoku_grid = recognize_grid(model, img)
print(np.matrix(sudoku_grid))
img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))
cv2.imshow("Img: ", img)
# cv2.imshow("Gray: ", img_gray)
# cv2.imshow("Bin: ", bin_img)
# cv2.imshow("Dilated horizontal: ", img_dilated_horizontal)
# cv2.imshow("Dilated vertical: ", img_dilated_vertical)
# cv2.imshow("Joints: ", joints)
# cv2.imshow("Mask: ", mask)
cv2.waitKey(0)
if __name__ == "__main__":
main()
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
874
|
Maveric4/SudokuSolver
|
refs/heads/master
|
/utils.py
|
import cv2
import numpy as np
import tensorflow
def standarize_digit_img_to_model_input(img, size):
if isinstance(img, str):
img = cv2.imread(img)
img_resized = cv2.resize(img, (size, size))
return img_resized
def binarize_img(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return cv2.bitwise_not(th)
def prepare_to_predict(img):
return img.reshape(1, 28, 28, 1) / 255.0
def predict_digit(model, img):
digit_img = standarize_digit_img_to_model_input(img, 28)
if len(img.shape) == 3:
bin_digit_img = binarize_img(digit_img)
else:
bin_digit_img = digit_img
img = prepare_to_predict(bin_digit_img)
prob_predictions = model.predict(img)
if np.any(prob_predictions > 0.7):
prediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]
return prediction[0]
else:
return 0
def load_model(model_path):
return tensorflow.keras.models.load_model(model_path)
def load_mnist_model():
model_path = "./MNISTmodel/models/train_mnist1_model3.h5"
return tensorflow.keras.models.load_model(model_path)
|
{"/sudoku_solver.py": ["/utils.py", "/grid.py"], "/solve_sudoku_from_app.py": ["/utils.py", "/grid.py"], "/MNISTmodel/test_mnist.py": ["/utils.py"], "/grid.py": ["/utils.py"]}
|
901
|
avinash-arjavalingam/262_project
|
refs/heads/main
|
/workloads/toy/simple_system.py
|
from simulator.event_queue import EventQueue
from simulator.resource import *
from simulator.dag import Dag
from simulator.system import System
from workloads.toy.linear_dag import linear_dag_clockwork_data, linear_instance_list, linear_instance_placements
class SimpleSystem(System):
pools: Dict[str, ResourcePool]
def __init__(self,_events: EventQueue, _pools: Dict[str, ResourcePool]):
super().__init__(_events)
self.pools = _pools
self.dag_maps = {}
def schedule(self, curr_time, events, *args, **kwargs):
# First check for any completed functions
for name, pool in self.pools.items():
for resource in pool.get_all_resources():
completed = resource.remove_at_time(curr_time)
for (fid, tag) in completed:
assert tag in self.outstanding_requests, "Tag needs to map to an outstanding request"
self.outstanding_requests[tag] = (True, self.outstanding_requests[tag][1])
# Now process any new events
for (dag, input) in events:
# for linear_instance in linear_instance_list:
# print(linear_instance.id_res_map)
# print(linear_instance.running_time)
# print(linear_instance.running_cost)
# for price_instance in linear_instance_placements.price_list:
# print(price_instance.running_cost)
# for time_instance in linear_instance_placements.time_list:
# print(time_instance.running_time)
# sample_placement = (linear_instance_placements.get_sample_list(10000, 10000))[0]
# self.dag_maps = sample_placement.id_res_map
print(linear_dag_clockwork_data)
if linear_dag_clockwork_data[1][0] < 20 and linear_dag_clockwork_data[1][1] < 85:
self.dag_maps[dag.name] = 'STD_GPU'
elif linear_dag_clockwork_data[0][0] < 20 and linear_dag_clockwork_data[0][1] < 85:
self.dag_maps[dag.name] = 'STD_CPU'
else:
continue
# print(dag_maps)
# for sample_instance in linear_instance_placements.get_sample_list(10000, 10000):
# print(sample_instance.running_time)
# print(sample_instance.running_cost)
# print("Done")
# print("Hello")
dag.execute() # Need to do this to seal the DAG
self.outstanding_requests[self.__generate_tag(dag, curr_time)] = (True, dag)
# Now schedule functions
for tag, (flag, dag) in self.outstanding_requests.copy().items():
if flag:
if dag.has_next_function():
# Find which resource is faster
nxt = dag.peek_next_function()
# std_cpu = nxt.resources['STD_CPU']
# std_gpu = nxt.resources['STD_GPU']
# cpu_time = std_cpu['pre'].get_runtime() + std_cpu['exec'].get_runtime() + std_cpu['post'].get_runtime()
# gpu_time = std_gpu['pre'].get_runtime() + std_gpu['exec'].get_runtime() + std_gpu['post'].get_runtime()
# if cpu_time < gpu_time:
# pool = self.pools['STD_CPU_POOL']
# else:
# pool = self.pools['STD_GPU_POOL']
# print(self.dag_maps)
# print(nxt.unique_id)
if self.dag_maps[dag.name] == 'STD_GPU':
pool = self.pools['STD_GPU_POOL']
# print("GPU")
else:
pool = self.pools['STD_CPU_POOL']
# print("CPU")
# If there is a resource available, schedule it
result : Optional[Tuple[str, Resource]] = pool.find_first_available_resource(nxt, tag)
if result:
(name, rsrc) = result
rsrc.add_function(dag.next_function(), tag, curr_time)
self.outstanding_requests[tag] = (False, self.outstanding_requests[tag][1])
else:
# Remove if there is no next function
self.outstanding_requests.pop(tag)
def __generate_tag(self, dag: Dag, time: int):
return f"{dag.name}:{time}:{id(dag)}"
def __decode_tag(self, tag: str) -> Dag:
return self.outstanding_requests[tag]
|
{"/workloads/toy/simple_system.py": ["/workloads/toy/linear_dag.py"]}
|
902
|
avinash-arjavalingam/262_project
|
refs/heads/main
|
/workloads/toy/linear_dag.py
|
from simulator.dag import Dag, Function
from simulator.resource import ResourceType
from simulator.runtime import ConstantTime
from .constants import *
from random import randint, sample
from bisect import bisect
# linear_first = Function(
# unique_id='linear_first',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(3),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(2),
# 'post' : ConstantTime(0)
# }
# }
# )
#
# linear_second = Function( # This function takes a long time to run on a CPU
# unique_id='linear_second',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(5),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(1),
# 'post' : ConstantTime(0)
# }
# }
# )
#
# linear_third = Function( # This function takes a long time to run on a GPU
# unique_id='linear_third',
# resources= {
# 'STD_CPU' : {
# 'type' : ResourceType.CPU,
# 'space': 100.0, # Ignoring space this function requires on the CPU
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(1),
# 'post' : ConstantTime(0)
# },
# 'STD_GPU' : {
# 'type' : ResourceType.GPU,
# 'space': 100.0,
# 'pre' : ConstantTime(1),
# 'exec' : ConstantTime(5),
# 'post' : ConstantTime(0)
# }
# }
# )
linear_first = Function(
unique_id='linear_first',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(3),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(1),
'post' : ConstantTime(0)
}
}
)
linear_second = Function( # This function takes a long time to run on a CPU
unique_id='linear_second',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(5),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(2),
'post' : ConstantTime(0)
}
}
)
linear_third = Function( # This function takes a long time to run on a GPU
unique_id='linear_third',
resources= {
'STD_CPU' : {
'type' : ResourceType.CPU,
'space': 100.0, # Ignoring space this function requires on the CPU
'pre' : ConstantTime(1),
'exec' : ConstantTime(8),
'post' : ConstantTime(0)
},
'STD_GPU' : {
'type' : ResourceType.GPU,
'space': 100.0,
'pre' : ConstantTime(1),
'exec' : ConstantTime(3),
'post' : ConstantTime(0)
}
}
)
# Add costs to functions
all_funs = [linear_first, linear_second, linear_third]
for f in all_funs:
for rsrc_name, specs in f.resources.items():
if rsrc_name == 'STD_CPU':
specs['cost'] = COST_PER_CPU_TIME * specs['exec'].get_runtime()
else:
specs['cost'] = COST_PER_GPU_TIME * specs['exec'].get_runtime()
linear_dag = Dag('linear', funs=[linear_first, linear_second, linear_third])
linear_dag.add_edge(linear_first, linear_second)
linear_dag.add_edge(linear_second, linear_third)
linear_dag.sanity_check()
def gen_clockwork(dag_functions):
dag_cpu_time = 0
dag_cpu_cost = 0
dag_gpu_time = 0
dag_gpu_cost = 0
for func in list(dag_functions):
dag_cpu = func.resources['STD_CPU']
dag_gpu = func.resources['STD_GPU']
dag_cpu_time += dag_cpu['pre'].get_runtime() + dag_cpu['exec'].get_runtime() + dag_cpu['post'].get_runtime()
dag_gpu_time += dag_gpu['pre'].get_runtime() + dag_gpu['exec'].get_runtime() + dag_gpu['post'].get_runtime()
dag_cpu_cost += dag_cpu['cost']
dag_gpu_cost += dag_gpu['cost']
return [[dag_cpu_time, dag_cpu_cost], [dag_gpu_time, dag_gpu_cost]]
linear_dag_clockwork_data = gen_clockwork(linear_dag.functions.values())
class DAGInstance:
def __init__(self, dag):
self.dag = dag
self.running_time = 0
self.running_cost = 0
# self.functions_per_resource = {}
self.id_res_map = {}
# self.id_max_map = {}
# for res in ["GPU", "CPU"]:
# self.functions_per_resource[res] = []
# def add_func_res(self, function, resource):
# func_tuple = (function.id, function.get_max_memory(resource))
# self.functions_per_resource[resource].append(func_tuple)
def copy_dag_instance(self):
new_dag_instance = DAGInstance(self.dag)
for id_one, res in list(self.id_res_map.items()):
new_dag_instance.id_res_map[id_one] = res
# for id_two, max_prev in self.id_max_map:
# new_dag_instance.id_max_map[id_two] = max_prev
# for i in range(len(self.functions_per_resource)):
# for func_tuple in self.functions_per_resource[i]:
# new_tuple = (func_tuple[0], func_tuple[1])
# new_dag_instance.functions_per_resource[i].append(new_tuple)
new_dag_instance.running_cost = self.running_cost
new_dag_instance.running_time = self.running_time
return new_dag_instance
def update_dag_instance(self, this_function, res):
self.id_res_map[this_function.unique_id] = res
# func_time = func.get_resource_runtime(res) + self.id_max_map[func.id]
# for root_next_func in func.next_funcs:
# next_max_time = 0
# if root_next_func.id in self.id_max_map:
# next_max_time = self.id_max_map[root_next_func.id]
# self.id_max_map[root_next_func.id] = max(func_time, next_max_time)
# self.running_time = max(self.running_time, func_time)
func_res = this_function.resources[res]
self.running_time = self.running_time + func_res['pre'].get_runtime() + func_res['exec'].get_runtime() + func_res['post'].get_runtime()
self.running_cost = self.running_cost + func_res['cost']
# self.add_func_res(func, res)
# self.id_max_map.pop(func.id, None)
resource_list = ['STD_CPU', 'STD_GPU']
def gen_dag_instances(dag):
dep_queue = dag
instance_list = []
root = dep_queue.pop(0)
for root_res in list(resource_list):
root_dag_instance = DAGInstance(dag)
root_dag_instance.id_res_map[root.unique_id] = root_res
# print(root_dag_instance.id_res_map[root.unique_id])
# for root_next_func in root.next_funcs:
# root_dag_instance.id_max_map[root_next_func.id] = root.get_resource_runtime(root_res)
root_func_res = root.resources[root_res]
root_dag_instance.running_time = root_func_res['pre'].get_runtime() + root_func_res['exec'].get_runtime() + root_func_res['post'].get_runtime()
root_dag_instance.running_cost = root_func_res['cost']
# root_dag_instance.add_func_res(root, root_res)
instance_list.append(root_dag_instance)
while len(dep_queue) > 0:
function = dep_queue.pop(0)
new_instance_list = []
for dag_instance in instance_list:
for res in list(resource_list):
new_dag_instance = dag_instance.copy_dag_instance()
new_dag_instance.update_dag_instance(function, res)
new_instance_list.append(new_dag_instance)
instance_list = new_instance_list
# for finished_dag_instance in instance_list:
# for func_res in list(finished_dag_instance.functions_per_resource.values()):
# sorted(func_res, key=lambda x: x[1])
return instance_list
def select_pareto_instances(instance_list):
pareto_list = []
for instance in instance_list:
pareto_add = True
for comp_instance in instance_list:
if not (instance is comp_instance):
if (comp_instance.running_time <= instance.running_time) and (comp_instance.running_cost <= instance.running_cost):
pareto_add = False
break
if pareto_add:
pareto_list.append(instance)
return pareto_list
linear_instance_list = select_pareto_instances(gen_dag_instances([linear_first, linear_second, linear_third]))
class DAGSelector:
def __init__(self, instance_list, sample_size):
self.price_list = sorted(instance_list, key=lambda x: x.running_cost)
self.time_list = sorted(instance_list, key=lambda x: x.running_time)
self.sample_size = int(max(min(sample_size, len(self.price_list)), 1))
def binary_find_index(self, value, this_list, type):
keys = []
if type == "price":
keys = [this_inst.running_cost for this_inst in this_list]
else:
keys = [this_inst.running_time for this_inst in this_list]
pos = (bisect(keys, value, 0, len(this_list)))
return pos
def get_sample_list(self, price_slo, time_slo):
sample_list = []
price_index = self.binary_find_index(price_slo, self.price_list, "price")
time_index = self.binary_find_index(time_slo, self.time_list, "cost")
if (price_index <= 0) or (time_index <= 0):
return []
end_index = len(self.price_list) - time_index
valid_size = price_index - end_index
if valid_size <= 0:
return []
valid_list = self.price_list[end_index:price_index]
min_size = min(self.sample_size, len(valid_list))
sample_list = sample(valid_list, min_size)
return sample_list
def get_placements(self, cluster, sample_instance):
function_place_map = {}
for res, res_list in list(sample_instance.functions_per_resource.items()):
res_nodes = cluster.nodes_by_res[res]
updated_nodes = []
for func_id, func_mem in res_list:
placed = False
done = False
while (not placed) and (not done):
if len(res_nodes) == 0:
done = True
elif func_mem <= res_nodes[0].memory:
function_place_map[func_id] = res_nodes[0].id
res_nodes[0].memory = res_nodes[0].memory - func_mem
placed = True
else:
popped_node = res_nodes.pop(0)
updated_nodes.append(popped_node)
if done:
break
if len(res_nodes) == 0:
cluster.nodes_by_res[res] = sorted(updated_nodes, key=lambda x: x.memory)
return {}
else:
res_nodes.extend(updated_nodes)
cluster.nodes_by_res[res] = sorted(res_nodes, key=lambda x: x.memory)
return function_place_map
linear_instance_placements = DAGSelector(linear_instance_list, 1)
|
{"/workloads/toy/simple_system.py": ["/workloads/toy/linear_dag.py"]}
|
903
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/models.py
|
import datetime
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
# from ProjektPortfolioLab.donation import settings
from django.conf import settings
User = settings.AUTH_USER_MODEL
class Category(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
INSTITUTIONS = (
('1', "Fundacja"),
('2', "Organizacja pozarządowa"),
('3', "Zbiórka lokalna"),
)
class Institution(models.Model):
istitution_name = models.CharField(max_length=128)
description = models.TextField()
type = models.CharField(max_length=2, choices=INSTITUTIONS, default='1')
categories = models.ManyToManyField(Category)
def __str__(self):
return self.istitution_name
class Donation(models.Model):
quantity = models.IntegerField()
categories = models.ManyToManyField(Category)
institution = models.ForeignKey(Institution, on_delete=models.CASCADE)
address = models.TextField()
phone_number = models.CharField(max_length=12)
city = models.CharField(max_length=64)
zip_code = models.TextField()
pick_up_date = models.DateField()
pick_up_time = models.TimeField(default=datetime.time)
pick_up_comment = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
#
# class CustomUser(AbstractUser):
# email = models.EmailField(_('email address'), unique=True)
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
904
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/admin.py
|
from django.contrib import admin
from .models import Category, Institution, Donation
admin.site.register(Category)
admin.site.register(Institution)
admin.site.register(Donation)
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
905
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/donation/urls.py
|
"""donation URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from charitydonation.views import LandingPage, AddDonation, UserView, PasswordChangeView, PasswordChangeDoneView, DonationReady
from accounts.views import RegisterView, LoginView, LogoutView
urlpatterns = [
path('admin/', admin.site.urls),
path('', LandingPage.as_view(), name='landing-page'),
path('add_donation/', AddDonation.as_view(), name='add-donation'),
path('login/', LoginView.as_view(), name='login'),
path('register/', RegisterView.as_view(), name='register'),
path('logout/', LogoutView.as_view(), name='logout'),
path('user_view/', UserView.as_view(), name='user-view'),
path('password_change/', PasswordChangeView.as_view(), name='user-change'),
path('password_change/done/', PasswordChangeDoneView.as_view(), name='user-change-done'),
path('add_donation/form-confirmation/', DonationReady.as_view(), name='form-ready'),
]
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
906
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/forms.py
|
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import Donation
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
# class CreateUserForm(UserCreationForm):
# class Meta:
# model = get_user_model()
# fields = ('email', 'username', 'password1', 'password2')
class AddDonationForm(forms.Form):
class Meta:
model = Donation
fields = ('quantity', 'categories', 'institution', 'address', 'phone_number',
'city', 'zip_code', 'pick_up_date', 'pick_up_time', 'pick_up_comment', 'user')
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
907
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/migrations/0003_auto_20210913_1642.py
|
# Generated by Django 3.1 on 2021-09-13 16:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charitydonation', '0002_auto_20210909_1554'),
]
operations = [
migrations.AddField(
model_name='donation',
name='pick_up_time',
field=models.TimeField(default=datetime.time),
),
migrations.AlterField(
model_name='donation',
name='pick_up_date',
field=models.DateField(),
),
]
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
908
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/accounts/views.py
|
from django.shortcuts import render
from django.views import View, generic
from django.contrib.auth import views
# from .forms import RegisterForm
from django.shortcuts import render
from django.views import View, generic
# from charitydonation.models import Donation, Institution
from .forms import CreateUserForm, LoginForm, CustomUserCreationForm
from django.contrib.auth import login, logout, authenticate, views
from django.shortcuts import redirect
from django.urls import reverse_lazy
class LoginView(View):
def get(self, request):
form = LoginForm()
return render(request, 'login.html', {'form': form})
def post(self, request, *args, **kwargs):
form = LoginForm(request.POST)
if form.is_valid():
user = authenticate(email=form.cleaned_data['email'], password=form.cleaned_data['password'])
# breakpoint()
if user is not None:
login(request, user)
return redirect('landing-page')
else:
return render(request, 'login.html', {'form': form})
else:
return render(request, 'login.html', {'form': form})
class RegisterView(View):
def get(self, request):
form = CustomUserCreationForm()
return render(request, 'register.html', {'form': form})
def post(self, request):
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
# instance = form.save(commit=False)
# instance.set_password(instance.password)
# # form.clean_password2()
# instance.save()
# # email = form.cleaned_data['email']
# raw_password = form.cleaned_data['password']
# user = authenticate(email=email, password=raw_password)
# user.save()
# login(request, user)
return redirect('landing-page')
return render(request, 'register.html', {'form': form})
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('landing-page')
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
909
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/views.py
|
from django.shortcuts import render
from django.views import View, generic
from .models import Donation, Institution, Category
from .forms import AddDonationForm
from django.contrib.auth import login, logout, authenticate, views
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.generic.edit import CreateView
from django.db.models import Avg, Count
from django.core.paginator import Paginator
from django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView
from django.http import HttpResponse
from django.db.models import Q, Sum
class LandingPage(View):
def get(self, request):
count_bags = Donation.objects.all()
count_b = count_bags.aggregate(Sum('quantity'))['quantity__sum']
count_institutions = Donation.objects.distinct("institution").count()
#
all_institution_fund = Institution.objects.filter(type='1')
all_institution_org = Institution.objects.filter(type='2')
all_institution_lok = Institution.objects.filter(type='3')
return render(request, 'index.html', {'count_b': count_b, 'count_institutions': count_institutions,
'all_institution_fund': all_institution_fund,
'all_institution_org': all_institution_org,
'all_institution_lok': all_institution_lok}
)
class AddDonation(LoginRequiredMixin, View):
login_url = '/'
# raise_exception = True
def get(self, request):
categories_all = Category.objects.all()
institutions_all = Institution.objects.all()
form = AddDonationForm()
# redirect_field_name = 'landing-page'
return render(request, 'form.html',
{'categories_all': categories_all,
'institutions_all': institutions_all, 'form': form})
def post(self, request):
form = AddDonationForm(request.POST)
if form.is_valid():
# categories_all = Category.objects.all()
categories = form.cleaned_data['categories']
# institutions_all = Institution.objects.all()
quantity = form.cleaned_data['bags']
# category_id = request.POST.get('category.id')
# catogeria = Institution.objects.filter(id=category_id)
institution = form.cleaned_data['organization']
# if request.POST.get(
# catego = Category.objects.get(id=category_id)
address = form.cleaned_data['address']
city = form.cleaned_data['city']
zip_code = form.cleaned_data['postcode']
phone_number = form.cleaned_data['phone']
pick_up_date = form.cleaned_data['data']
pick_up_time = form.cleaned_data['time']
pick_up_comment = form.cleaned_data['more_info']
user = request.user
donat = Donation.objects.create(
quantity=quantity, categories=categories, institution=institution,
address=address, phone_number=phone_number, city=city, zip_code=zip_code,
pick_up_date=pick_up_date, pick_up_comment=pick_up_comment, pick_up_time=pick_up_time,
user=user)
donat.save()
# redirect_field_name = 'landing-page'
return render(request, 'form-confirmation.html', {'form': form})
return render(request, 'form.html', {'form': form})
# return HttpResponse("Źle")
# class LoginView(views.LoginView):
# form_class = LoginForm
# template_name = 'login.html'
#
#
# class RegisterView(generic.CreateView):
# form_class = CreateUserForm
# template_name = 'register.html'
# success_url = reverse_lazy('login')
class UserView(LoginRequiredMixin, View):
login_url = '/'
def get(self, request):
donation_user = Donation.objects.filter(user=request.user)
return render(request, 'user-view.html', {'donation_user': donation_user})
class PasswordChangeView(PasswordChangeView):
template_name = 'change-password.html'
success_url = 'done/'
class PasswordChangeDoneView(PasswordChangeDoneView):
template_name = 'change-password-done.html'
class DonationReady(View):
def get(self, request):
return render(request, 'form-confirmation.html')
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
910
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/charitydonation/migrations/0002_auto_20210909_1554.py
|
# Generated by Django 3.1 on 2021-09-09 15:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charitydonation', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='donation',
name='pick_up_time',
),
migrations.AlterField(
model_name='donation',
name='pick_up_date',
field=models.DateTimeField(verbose_name=datetime.datetime),
),
migrations.AlterField(
model_name='institution',
name='type',
field=models.CharField(choices=[('1', 'Fundacja'), ('2', 'Organizacja pozarządowa'), ('3', 'Zbiórka lokalna')], default='1', max_length=2),
),
]
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
911
|
krzysztof-dudzic/ProjektPortfolioLab
|
refs/heads/main
|
/accounts/models.py
|
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, UserManager
)
#
# class UserManager(BaseUserManager):
# def create_user(self, email, password=None):
# """
# Creates and saves a User with the given email and password.
# """
# if not email:
# raise ValueError('Users must have an email address')
#
# if not password:
# raise ValueError("Users must have a password!!! ")
# user = self.model(
# email=self.normalize_email(email),
# )
#
# user.set_password(password)
# user.staff = is_staff
# user.admin = is_admin
# user.active = is_active
# # user.save(using=self._db)
# return user
#
# def create_staffuser(self, email, password):
# """
# Creates and saves a staff user with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# # user.save(using=self._db)
# return user
#
# def create_superuser(self, email, password):
# """
# Creates and saves a superuser with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# user.admin = True
# # user.save(using=self._db)
# return user
#
# class User(AbstractBaseUser):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# # full_name = models.CharField(max_length=255, blank=True, null=True)
# is_active = models.BooleanField(default=True)
# staff = models.BooleanField(default=False) # a admin user; non super-user
# admin = models.BooleanField(default=False) # a superuser
# timestamp = models.DateTimeField(auto_now_add=True)
# # notice the absence of a "Password field", that is built in.
#
# USERNAME_FIELD = 'email'
# REQUIRED_FIELDS = [] # Email & Password are required by default.
# objects = UserManager()
#
# def get_full_name(self):
# # The user is identified by their email address
# return self.email
#
# def get_short_name(self):
# # The user is identified by their email address
# return self.email
#
# def __str__(self):
# return self.email
#
# def has_perm(self, perm, obj=None):
# "Does the user have a specific permission?"
# # Simplest possible answer: Yes, always
# return True
#
# def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# # Simplest possible answer: Yes, always
# return True
#
# @property
# def is_staff(self):
# "Is the user a member of staff?"
# return self.staff
#
# @property
# def is_active(self):
# "Is the user a admin member?"
# return self.active
#
# @property
# def is_admin(self):
# "Is the user a admin member?"
# return self.admin
#
#
#
#
#
# class GuestEmail(models.Model):
# email = models.EmailField()
# active = models.BooleanField(default=True)
# update = models.DateTimeField(auto_now=True)
# timestamp = models.DateTimeField(auto_now_add=True)
#
# def __str__(self):
# return self.email
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
if not password:
raise ValueError("Users must have a password!!! ")
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_active', True)
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(email, password, **extra_fields)
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_('email address'), max_length=255, unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
|
{"/charitydonation/admin.py": ["/charitydonation/models.py"], "/donation/urls.py": ["/charitydonation/views.py", "/accounts/views.py"], "/charitydonation/forms.py": ["/charitydonation/models.py"], "/charitydonation/views.py": ["/charitydonation/models.py", "/charitydonation/forms.py"]}
|
972
|
prakashpatil1430/Fashionproject
|
refs/heads/main
|
/fashion/migrations/0003_alter_product_category.py
|
# Generated by Django 3.2.6 on 2021-09-25 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fashion', '0002_cart_orderplaced_product'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.CharField(choices=[('TS', 'Tshirts'), ('W', 'Watches'), ('P', 'Perfumes'), ('S', 'Shoes')], max_length=2),
),
]
|
{"/fashion/urls.py": ["/fashion/views.py"]}
|
973
|
prakashpatil1430/Fashionproject
|
refs/heads/main
|
/fashion/urls.py
|
from django.urls import path
# from.views import address,add_to_cart,mobile,checkout,orders,ProductView,ProductDetailView,CustomerRegistrationView,ProfileView,show_cart,laptop,fashion_top,fashion_bottom,gym_product,home_decor,plus_cart,minus_cart,remove_cart,payment_done,orders
from django.conf import settings
from django.conf.urls.static import static
# from django.contrib.auth import views as auth_views
from fashion.views import HomeView,perfume_view,product_view,shoes_view,watch_view,tshirt_view,ProductDetailView,add_to_cart,CustomerRegistrationView,ProfileView,address,show_cart,remove_cart,checkout,orders
from django.contrib.auth import views as auth_views
from .forms import LoginForm,MyPasswordChangeForm
# ,MyPasswordResetForm,MySetPasswordForm
urlpatterns = [
path('',HomeView.as_view(),name='home'),
path('alldata/',product_view,name="alldata"),
path('perfume/',perfume_view,name="perfume"),
path('perfume/<slug:data>/',perfume_view,name="perfume"),
path('watches/',watch_view,name="watches"),
path('watches/<slug:data>/',watch_view,name="watches"),
path('tshirts/',tshirt_view,name="tshirts"),
path('tshirts/<slug:data>/',tshirt_view,name="tshirts"),
path('shoes/',shoes_view,name="shoes"),
path('shoes/<slug:data>/',shoes_view,name="shoes"),
path('product-detail/<int:pk>',ProductDetailView.as_view(),name="product-detail"),
path('add-to-cart/',add_to_cart,name="add-to-cart"),
path('cart/',show_cart,name='cart'),
path('removecart/<int:pk>/',remove_cart,name='removecart'),
path('profile/',ProfileView.as_view(),name="profile"),
path('address/',address,name="address"),
path('orders/',orders,name="orders"),
path('regestration/',CustomerRegistrationView.as_view(),name="customerregistration"),
path('login/', auth_views.LoginView.as_view(template_name='fashion/login.html',authentication_form=LoginForm), name='login'),
path('logout/', auth_views.LogoutView.as_view(next_page='login') ,name='logout'),
path('passwordchange/',auth_views.PasswordChangeView.as_view(template_name='fashion/passwordchange.html',form_class=MyPasswordChangeForm,success_url='/passwordchangedone/'),name="passwordchange"),
path('passwordchangedone/', auth_views.PasswordChangeDoneView.as_view(template_name='fashion/passwordchangedone.html'), name='passwordchangedone'),
path('checkout/',checkout,name='checkout'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/fashion/urls.py": ["/fashion/views.py"]}
|
974
|
prakashpatil1430/Fashionproject
|
refs/heads/main
|
/fashion/views.py
|
from django.shortcuts import render
from django.views import View
from .models import Product, Customer, Cart, OrderPlaced
from django.shortcuts import render, redirect, HttpResponse
from .forms import CustomerRegistrationForm, CustomerProfileForm
from django.contrib import messages
from django.db.models import Q
# Create your views here.
class HomeView(View):
def get(self, request):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
return render(request, 'fashion/index2.html', context={'data': all_product, 'totalitem': totalitem})
def product_view(request, data=None):
all_product = Product.objects.all()
return render(request, 'fashion/index2.html', {'data': all_product})
class ProductDetailView(View):
def get(self, request, pk):
totalitem = 0
product = Product.objects.get(pk=pk)
item_already_in_cart = False
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
item_already_in_cart = Cart.objects.filter(
Q(product=product.id) & Q(user=request.user)).exists()
return render(request, 'fashion/productdetail.html', {'product': product, 'totalitem': totalitem, 'item_already_in_cart': item_already_in_cart})
def perfume_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
perfume = Product.objects.filter(category='P')
elif data == 'Below1000':
perfume = Product.objects.filter(
category='P').filter(discounted_price__lt=1000)
elif data == 'Above1000':
perfume = Product.objects.filter(
category='P').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'perfume': perfume, 'totalitem': totalitem, 'data': all_product})
def tshirt_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
tshirts = Product.objects.filter(category='TS')
elif data == 'm-tshirt':
tshirts = Product.objects.filter(category='TS').filter(brand=data)
elif data == 'w-tshirt':
tshirts = Product.objects.filter(category='TS').filter(brand=data)
elif data == 'Below1000':
tshirts = Product.objects.filter(
category='TS').filter(discounted_price__lt=1000)
elif data == 'Above1000':
tshirts = Product.objects.filter(
category='TS').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'tshirts': tshirts, 'totalitem': totalitem, 'data': all_product})
def watch_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
watches = Product.objects.filter(category='W')
elif data == 'm-watch':
watches = Product.objects.filter(category='W').filter(brand=data)
elif data == 'w-match':
tshirts = Product.objects.filter(category='W').filter(brand=data)
elif data == 'Below1000':
watches = Product.objects.filter(
category='W').filter(discounted_price__lt=1000)
elif data == 'Above1000':
watches = Product.objects.filter(
category='W').filter(discounted_price__gt=1000)
return render(request, 'fashion/index2.html', {'watches': watches, 'totalitem': totalitem, 'data': all_product})
def shoes_view(request, data=None):
all_product = Product.objects.all()
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
if data == None:
shoes = Product.objects.filter(category='S')
elif data == 'man-shoes':
shoes = Product.objects.filter(category='S').filter(brand=data)
elif data == 'women-shoes':
shoes = Product.objects.filter(category='S').filter(brand=data)
elif data == 'Above1000':
shoes = Product.objects.filter(
category='S').filter(discounted_price__gt=1000)
elif data == 'Below1000':
shoes = Product.objects.filter(
category='S').filter(discounted_price__lt=1000)
return render(request, 'fashion/index2.html', {'shoes': shoes, 'totalitem': totalitem, 'data': all_product})
def add_to_cart(request):
if request.user.is_authenticated:
user = request.user
product_id = request.GET.get('prod_id')
product = Product.objects.get(id=product_id)
Cart(user=user, product=product).save()
return redirect('/cart')
else:
return redirect('/login')
def remove_cart(request, pk):
user = request.user
product = Product.objects.get(pk=pk)
c = Cart.objects.get(Q(product=product) & Q(user=user))
c.delete()
return redirect('/cart')
class CustomerRegistrationView(View):
def get(self, request):
form = CustomerRegistrationForm()
return render(request, 'fashion/customer_reg.html', {'form': form})
def post(self, request):
form = CustomerRegistrationForm(request.POST)
if form.is_valid():
messages.success(
request, 'Congratulations!! Registered Successfully.')
form.save()
return render(request, 'fashion/customer_reg.html', {'form': form})
class ProfileView(View):
def get(self, request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
form = CustomerProfileForm()
return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})
def post(self, request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
form = CustomerProfileForm(request.POST)
if form.is_valid():
usr = request.user
name = form.cleaned_data['name']
locality = form.cleaned_data['locality']
city = form.cleaned_data['city']
state = form.cleaned_data['state']
zipcode = form.cleaned_data['zipcode']
reg = Customer(user=usr, name=name, locality=locality,
city=city, state=state, zipcode=zipcode)
reg.save()
messages.success(
request, 'Congratulations!! Profile Updated Successfully.')
return render(request, 'fashion/profile.html', {'form': form, 'active': 'btn-primary', 'totalitem': totalitem})
def checkout(request):
if request.user.is_authenticated:
user = request.user
addr = Customer.objects.filter(user=user)
cart_items = Cart.objects.filter(user=user)
amount = 0.0
shipping_amount = 70
total_amount = 0.0
cart_product = [p for p in Cart.objects.all() if p.user == user]
if cart_product:
for p in cart_product:
tempamount = (p.quantity * p.product.discounted_price)
amount = amount+tempamount
total_amount = amount + shipping_amount
return render(request, 'fashion/checkout.html', {'addr': addr, 'cart_items': cart_items, 'total_amount': total_amount})
else:
return redirect('/login')
def address(request):
totalitem = 0
if request.user.is_authenticated:
totalitem = len(Cart.objects.filter(user=request.user))
addr = Customer.objects.filter(user=request.user)
return render(request, 'fashion/address.html', {'addr': addr, 'active': 'btn-primary', 'totalitem': totalitem})
def show_cart(request):
if request.user.is_authenticated:
user = request.user
cart = Cart.objects.filter(user=user)
amount = 0.0
shipping_amount = 70
total_amount = 0.0
cart_product = [p for p in Cart.objects.all() if p.user == user]
if cart_product:
for p in cart_product:
tempamount = (p.quantity * p.product.discounted_price)
amount = amount+tempamount
total_amount = amount + shipping_amount
return render(request, 'fashion/addtocart.html', {'carts': cart, 'amount': amount, 'total_amount': total_amount})
else:
return render(request, 'fashion/emptycart.html')
else:
return redirect('/login')
def orders(request):
user = request.user
customer_id = user.id
print(customer_id)
cartid = Cart.objects.filter(user=user)
customer = Customer.objects.get(id=customer_id)
for cid in cartid:
OrderPlaced(user=user, customer=customer,
product=cid.product, quantity=cid.quantity).save()
# print("Order Saved")
cid.delete()
# print("Cart Item Deleted")
return redirect("/orders")
op = OrderPlaced.objects.filter(user=request.user)
return render(request, 'fashion/orders.html', {'order_placed': op})
|
{"/fashion/urls.py": ["/fashion/views.py"]}
|
979
|
jlamonade/splitteroni
|
refs/heads/master
|
/pages/tests.py
|
from django.test import TestCase
from django.urls import reverse, resolve
from django.contrib.auth import get_user_model
from .views import HomePageView
# Create your tests here.
class HomepageTests(TestCase):
def setUp(self):
url = reverse('home')
self.response = self.client.get(url)
self.user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='testpass',
)
def test_homepage_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_homepage_template(self):
self.assertTemplateUsed(self.response, 'home.html')
def test_homepage_contains_correct_html_while_logged_out(self):
self.assertContains(self.response, 'Create a new split. Log in or sign up to save your splits.')
self.assertContains(self.response, 'Sign up')
def test_homepage_contains_correct_html_while_logged_in(self):
self.client.login(email='testuser@email.com', password='testpass')
self.assertContains(self.response, 'Create a new split.')
def test_homepage_does_not_contain_incorrect_html(self):
self.assertNotContains(self.response, 'Should not contain this')
def test_homepage_url_resolves_homepageview(self):
view = resolve('/')
self.assertEqual(
view.func.__name__, HomePageView.as_view().__name__
)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
980
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/tests.py
|
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.contrib.auth import get_user_model
from decimal import Decimal
from .models import Bill, Person, Item
# Create your tests here.
class SplitterTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='testpass',
)
self.bill = Bill.objects.create(
title='testbill',
tip=12.00,
tax=13.00,
owner=self.user,
)
self.person = Person.objects.create(
name='testperson',
bill=self.bill
)
self.item = Item.objects.create(
title='testitem',
price=14.00,
person=self.person,
bill=self.bill,
)
self.shared_item = Item.objects.create(
title='testshareditem',
price=15.00,
bill=self.bill,
shared=True,
)
# Testing tax percent/amount
self.bill_two = Bill.objects.create(
title='testbill2',
tip_percent=15,
tax_percent=8.875,
owner=self.user,
)
self.item_two = Item.objects.create(
title='testitem2',
price=14.00,
bill=self.bill_two,
shared=True,
)
self.bill_total = self.item.price + self.shared_item.price + self.bill.tax + self.bill.tip
self.shared_item_total = self.bill.tip + self.bill.tax + self.shared_item.price
self.bill_detail_response = self.client.get(self.bill.get_absolute_url())
self.bill_two_response = self.client.get(self.bill_two.get_absolute_url())
def test_bill_object(self):
self.assertEqual(self.bill.title, 'testbill')
self.assertEqual(self.bill.tip, 12.00)
self.assertEqual(self.bill.tax, 13.00)
self.assertEqual(self.bill.owner, self.user)
def test_bill_list_view_for_logged_in_user(self):
self.client.login(email='testuser@email.com', password='testpass')
response = self.client.get(reverse('bill-list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testbill'.title())
self.assertTemplateUsed(response, 'splitter/bill_list.html')
def test_bill_list_view_for_logged_out_users(self):
response = self.client.get(reverse('bill-list'))
self.assertEqual(response.status_code, 200)
def test_bill_detail_view(self):
no_response = self.client.get('/bill/12345/')
self.assertEqual(self.bill_detail_response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(self.bill_detail_response, 'testbill'.title())
self.assertContains(self.bill_detail_response, '12.00')
self.assertContains(self.bill_detail_response, '13.00')
self.assertContains(self.bill_detail_response, self.item.price)
self.assertContains(self.bill_detail_response, self.shared_item.price)
self.assertContains(self.bill_detail_response, self.bill_total)
self.assertTemplateUsed(self.bill_detail_response, 'splitter/bill_detail.html')
def test_person_object(self):
self.assertEqual(self.person.name, 'testperson')
self.assertEqual(self.person.bill, self.bill)
def test_person_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testperson'.title())
def test_item_object(self):
self.assertEqual(self.item.title, 'testitem')
self.assertEqual(self.item.price, 14.00)
self.assertEqual(self.item.bill, self.bill)
self.assertEqual(self.item.person, self.person)
def test_item_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testitem')
self.assertContains(self.bill_detail_response, 14.00)
def test_shared_item_object(self):
self.assertEqual(self.shared_item.title, 'testshareditem')
self.assertEqual(self.shared_item.price, 15.00)
self.assertEqual(self.shared_item.bill, self.bill)
def test_shared_item_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testshareditem')
self.assertContains(self.bill_detail_response, 15.00)
def test_bill_model_methods(self):
"""Tests for Bill model methods."""
# Bill.get_order_total()
self.assertEqual(self.bill.get_order_grand_total(), self.bill_total)
# Bill.get_shared_items_total()
self.assertEqual(self.bill.get_shared_items_total(), self.shared_item.price)
def test_person_model_methods(self):
"""Tests for Person model methods."""
# Person.get_shared_items_split()
self.assertEqual(self.person.get_shared_items_split(), self.shared_item_total)
# Person.get_person_total()
self.assertEqual(self.person.get_person_total(), self.bill.get_order_grand_total())
def test_bill_calculate_tax(self):
self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tax_amount()))
self.assertContains(self.bill_two_response, self.bill_two.tax_percent)
self.bill_two.tax = 12.00
self.assertContains(self.bill_two_response, Decimal(self.bill_two.tax))
def test_bill_calculate_tip(self):
self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tip_amount()))
self.assertContains(self.bill_two_response, self.bill_two.tip_percent)
self.bill_two.tip = 12.00
self.assertContains(self.bill_two_response, Decimal(self.bill_two.tip))
def test_bill_saves_session(self):
self.client.session.create()
self.bill_three = Bill.objects.create(
title='testbill3',
session=self.client.session.session_key,
)
self.assertEqual(self.bill_three.session, self.client.session.session_key)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
981
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/urls.py
|
from django.urls import path
from .views import (
BillCreateView,
BillDetailView,
PersonCreateView,
PersonDeleteView,
BillListView,
ItemCreateView,
ItemDeleteView,
SharedItemCreateView,
BillUpdateView,
BillUpdateTaxPercentView,
BillUpdateTaxAmountView,
BillUpdateTipAmountView,
BillUpdateTipPercentView,
BillDeleteView,
)
urlpatterns = [
# Bill links
path('new/', BillCreateView.as_view(), name='bill-create'),
path('<uuid:pk>/', BillDetailView.as_view(), name='bill-detail'),
path('archive/', BillListView.as_view(), name='bill-list'),
path('<uuid:pk>/update/', BillUpdateView.as_view(), name='bill-update'),
path('<uuid:pk>/update-tax-percent/',
BillUpdateTaxPercentView.as_view(),
name='bill-update-tax-percent'),
path('<uuid:pk>/update-tax-amount/',
BillUpdateTaxAmountView.as_view(),
name='bill-update-tax-amount'),
path('<uuid:pk>/update-tip-amount/', BillUpdateTipAmountView.as_view(), name='bill-update-tip'),
path('<uuid:pk>/update-tip-percent/',
BillUpdateTipPercentView.as_view(),
name='bill-update-tip-percent'),
path('<uuid:pk>/delete/', BillDeleteView.as_view(), name='bill-delete'),
# Person links
path('<uuid:pk>/add-person/', PersonCreateView.as_view(), name='person-create'),
path('person/<uuid:pk>/delete/', PersonDeleteView.as_view(), name='person-delete'),
# Item links
path('<uuid:bill_id>/<uuid:person_id>/add-item/',
ItemCreateView.as_view(),
name='item-create'
),
path('<uuid:bill_id>/add-shared-item/',
SharedItemCreateView.as_view(),
name='shared-item-create'
),
path('item/<uuid:pk>/item-delete/', ItemDeleteView.as_view(), name='item-delete'),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
982
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/forms.py
|
from django.forms import forms, ModelForm
from django.utils.translation import gettext_lazy as _
from .models import Bill
class BillCreateForm(ModelForm):
class Meta:
model = Bill
fields = ('title', 'tax_percent', 'tip_percent',)
labels = {
'title': _('Name'),
}
help_texts = {
'title': _('The current date and time will be used if name field is empty.'),
'tax_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
'tip_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
}
error_messages = {
'title': {
'max_length': _("Name is too long."),
},
'tax_percent': {
'max_digits': _("Too many digits.")
},
'tip_percent': {
'max_digits': _("Too many digits.")
}
}
class BillUpdateForm(ModelForm):
class Meta:
model = Bill
fields = ('title',)
labels = {
'title': _('Name'),
}
class BillUpdateTaxPercentForm(ModelForm):
# def __init__(self, *args, **kwargs):
# initial = kwargs.get('initial', {})
# initial['tax'] = 0
# kwargs['initial'] = initial
# super(BillUpdateTaxPercentForm, self).__init__(*args, **kwargs)
class Meta:
model = Bill
fields = ('tax_percent',)
help_texts = {
'tax_percent': _('Please enter a percent(%) amount.')
}
class BillUpdateTaxAmountForm(ModelForm):
class Meta:
model = Bill
fields = ('tax',)
help_texts = {
'tax': _('Please enter a currency amount.')
}
class BillUpdateTipForm(ModelForm):
class Meta:
model = Bill
fields = ('tip',)
labels = {
'tip': _('Tip/Service Charge'),
}
help_texts = {
'tip': _('Please enter currency amount.')
}
class BillUpdateTipPercentForm(ModelForm):
class Meta:
model = Bill
fields = ('tip_percent',)
labels = {
'tip_percent': _('Tip/Service Charge Percent'),
}
help_texts = {
'tip': _('Please enter a percent(%) amount.')
}
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
983
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/models.py
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
984
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0005_merge_20201009_1438.py
|
# Generated by Django 3.1.2 on 2020-10-09 14:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('splitter', '0004_auto_20201008_2206'),
('splitter', '0004_auto_20201009_1430'),
]
operations = [
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
985
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0007_auto_20201009_1606.py
|
# Generated by Django 3.1.2 on 2020-10-09 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0006_auto_20201009_1603'),
]
operations = [
migrations.AddIndex(
model_name='item',
index=models.Index(fields=['id'], name='item_id_index'),
),
migrations.AddIndex(
model_name='person',
index=models.Index(fields=['id'], name='person_id_index'),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
986
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0009_merge_20201012_2025.py
|
# Generated by Django 3.1.2 on 2020-10-12 20:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('splitter', '0008_auto_20201011_1907'),
('splitter', '0008_auto_20201011_0301'),
]
operations = [
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
987
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/admin.py
|
from django.contrib import admin
from .models import Bill, Person, Item
# Register your models here.
admin.site.register(Bill)
admin.site.register(Person)
admin.site.register(Item)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
988
|
jlamonade/splitteroni
|
refs/heads/master
|
/config/settings.py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from environs import Env
env = Env()
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", default=False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=[])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third party apps
'crispy_forms',
'allauth',
'allauth.account',
'debug_toolbar',
# My apps
'users',
'pages',
'splitter',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
# # Cache settings
# CACHE_MIDDLEWARE_ALIAS = 'default'
# CACHE_MIDDLEWARE_SECONDS = 604800
# CACHE_MIDDLEWARE_KEY_PREFIX = ''
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR.joinpath('templates'))],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': env.dj_db_url(
"DATABASE_URL", default="postgres://postgres@db/postgres")
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
# Static file settings
STATIC_URL = '/static/'
STATICFILES_DIRS = (str(BASE_DIR.joinpath('static')),)
STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
AUTH_USER_MODEL = 'users.CustomUser'
# Crispy settings
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-allauth config
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
# Email settings
DEFAULT_FROM_EMAIL = 'lamgoesbam@gmail.com'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = 'apikey'
EMAIL_HOST_PASSWORD = env("DJANGO_EMAIL_HOST_PASSWORD", default='')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# django-debug-toolbar
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [ip[:-1] + "1" for ip in ips]
# Security settings
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
SECURE_HSTS_SECONDS = env.int("DJANGO_SECURE_HSTS_SECONDS", default=2592000)
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SESSION_COOKIE_SECURE = env.bool("DJANGO_SESSION_COOKIE_SECURE", default=True)
CSRF_COOKIE_SECURE = env.bool("DJANGO_CSRF_COOKIE_SECURE", default=True)
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
989
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0002_auto_20201007_2310.py
|
# Generated by Django 3.1.2 on 2020-10-08 03:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='title',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='person',
name='name',
field=models.CharField(max_length=30),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
990
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0012_bill_session.py
|
# Generated by Django 3.1.2 on 2020-10-16 21:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0011_bill_tip_percent'),
]
operations = [
migrations.AddField(
model_name='bill',
name='session',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
991
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/utils.py
|
from decimal import Decimal
def _check_tip_tax_then_add(self):
# Checks to see if tip or tax is null before adding them to total else it returns 0
total = 0
tip = self.get_tip_amount()
tax = self.get_tax_amount()
if tip:
total += tip
if tax:
total += tax
return Decimal(total)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
992
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/apps.py
|
from django.apps import AppConfig
class SplitterConfig(AppConfig):
name = 'splitter'
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
993
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0010_bill_tax_percent.py
|
# Generated by Django 3.1.2 on 2020-10-12 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0009_merge_20201012_2025'),
]
operations = [
migrations.AddField(
model_name='bill',
name='tax_percent',
field=models.DecimalField(blank=True, decimal_places=5, max_digits=10, null=True),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
994
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0001_initial.py
|
# Generated by Django 3.1.2 on 2020-10-08 02:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bill',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(blank=True, max_length=50, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('tip', models.DecimalField(blank=True, decimal_places=2, max_digits=15)),
('tax', models.DecimalField(blank=True, decimal_places=2, max_digits=15)),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='people', to='splitter.bill')),
],
options={
'verbose_name_plural': 'people',
},
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=2, max_digits=15)),
('shared', models.BooleanField(default=False)),
('bill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='splitter.bill')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='items', to='splitter.person')),
],
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
995
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0006_auto_20201009_1603.py
|
# Generated by Django 3.1.2 on 2020-10-09 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0005_merge_20201009_1438'),
]
operations = [
migrations.AddIndex(
model_name='bill',
index=models.Index(fields=['id'], name='id_index'),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
996
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/views.py
|
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.http import Http404
from decimal import Decimal
from .models import Bill, Person, Item
from .forms import (BillCreateForm,
BillUpdateForm,
BillUpdateTaxPercentForm,
BillUpdateTaxAmountForm,
BillUpdateTipForm,
BillUpdateTipPercentForm)
# from .mixins import BillUpdateViewMixin
# Create your views here.
class BillCreateView(CreateView):
template_name = 'splitter/bill_create.html'
form_class = BillCreateForm
def form_valid(self, form):
if self.request.user.is_authenticated:
form.instance.owner = self.request.user
return super().form_valid(form)
else:
self.request.session.create()
form.instance.session = self.request.session.session_key
return super().form_valid(form)
class BillDetailView(DetailView):
model = Bill
template_name = 'splitter/bill_detail.html'
context_object_name = 'bill'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['people'] = Person.objects.filter(
bill=self.object.id)
context['shared_items'] = Item.objects.filter(bill=self.object.id, shared=True)
if self.object.tax_percent:
context['tax_percentage'] = Decimal(self.object.tax_percent).quantize(Decimal('0.001'))
if self.object.tip_percent:
context['tip_percentage'] = Decimal(self.object.tip_percent.quantize(Decimal('0')))
return context
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
obj = get_object_or_404(Bill, id=pk)
if self.request.user.is_authenticated and self.request.user == obj.owner:
return obj
elif self.request.session.session_key == obj.session:
return obj
else:
raise Http404
class PersonCreateView(CreateView):
model = Person
template_name = 'splitter/person_create.html'
fields = ('name',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillDeleteView(DeleteView):
model = Bill
template_name = 'splitter/bill_delete.html'
def get_success_url(self):
return reverse_lazy('bill-list')
class BillListView(ListView):
template_name = 'splitter/bill_list.html'
context_object_name = 'bills'
def get_queryset(self):
if self.request.user.is_authenticated:
qs = Bill.objects.filter(owner=self.request.user).order_by('-date_created')
elif self.request.session.session_key:
qs = Bill.objects.filter(session=self.request.session.session_key).order_by('-date_created')
else:
qs = None
return qs
class PersonDeleteView(DeleteView):
model = Person
template_name = 'splitter/person_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class ItemCreateView(CreateView):
model = Item
template_name = 'splitter/item_create.html'
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
person = get_object_or_404(Person, id=self.kwargs['person_id'])
form.instance.bill = bill
form.instance.person = person
return super().form_valid(form)
class SharedItemCreateView(CreateView):
model = Item
template_name = "splitter/item_create.html"
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
form.instance.bill = bill
form.instance.shared = True
return super().form_valid(form)
class ItemDeleteView(DeleteView):
model = Item
template_name = 'splitter/item_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class BillUpdateView(UpdateView):
model = Bill
template_name = 'splitter/bill_update.html'
form_class = BillUpdateForm
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillUpdateTaxPercentView(UpdateView):
model = Bill
form_class = BillUpdateTaxPercentForm
template_name = 'splitter/bill_update_tax_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax = None
return super().form_valid(form)
class BillUpdateTaxAmountView(UpdateView):
model = Bill
form_class = BillUpdateTaxAmountForm
template_name = 'splitter/bill_update_tax_amount.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax_percent = None
return super().form_valid(form)
class BillUpdateTipAmountView(UpdateView):
model = Bill
form_class = BillUpdateTipForm
template_name = 'splitter/bill_update_tip.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip_percent = None
return super().form_valid(form)
class BillUpdateTipPercentView(UpdateView):
model = Bill
form_class = BillUpdateTipPercentForm
template_name = 'splitter/bill_update_tip_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip = None
return super().form_valid(form)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
997
|
jlamonade/splitteroni
|
refs/heads/master
|
/users/tests.py
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse, resolve
from .forms import CustomUserCreationForm, CustomUserChangeForm
# Create your tests here.
class CustomUserTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create(
username='test',
email='test@email.com',
password='test123',
)
self.assertEqual(user.username, 'test')
self.assertEqual(user.email, 'test@email.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_superuser(self):
User = get_user_model()
super_user = User.objects.create_superuser(
username='superuser',
email='superuser@email.com',
password='super123',
)
self.assertEqual(super_user.username, 'superuser')
self.assertEqual(super_user.email, 'superuser@email.com')
self.assertTrue(super_user.is_active)
self.assertTrue(super_user.is_staff)
self.assertTrue(super_user.is_superuser)
class SignupPageTests(TestCase):
username = 'testuser'
email = 'testuser@email.com'
def setUp(self):
url = reverse('account_signup')
self.response = self.client.get(url)
def test_signup_template(self):
self.assertEqual(self.response.status_code, 200)
self.assertTemplateUsed(self.response, 'account/signup.html')
self.assertContains(self.response, 'Sign up')
self.assertNotContains(self.response, 'Should not contain this')
def test_signup_form(self):
new_user = get_user_model().objects.create_user(
self.username, self.email
)
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(
get_user_model().objects.all()[0].username, 'testuser'
)
self.assertEqual(
get_user_model().objects.all()[0].email, 'testuser@email.com'
)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
998
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0004_auto_20201008_2206.py
|
# Generated by Django 3.1.2 on 2020-10-09 02:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0003_auto_20201007_2339'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='tax',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=15, null=True),
),
migrations.AlterField(
model_name='bill',
name='tip',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=15, null=True),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
999
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0011_bill_tip_percent.py
|
# Generated by Django 3.1.2 on 2020-10-15 04:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0010_bill_tax_percent'),
]
operations = [
migrations.AddField(
model_name='bill',
name='tip_percent',
field=models.DecimalField(blank=True, decimal_places=3, max_digits=10, null=True),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
1,000
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/migrations/0003_auto_20201007_2339.py
|
# Generated by Django 3.1.2 on 2020-10-08 03:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('splitter', '0002_auto_20201007_2310'),
]
operations = [
migrations.AlterField(
model_name='bill',
name='tax',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True),
),
migrations.AlterField(
model_name='bill',
name='tip',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True),
),
]
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
1,001
|
jlamonade/splitteroni
|
refs/heads/master
|
/splitter/mixins.py
|
class BillUpdateViewMixin(object):
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
|
{"/splitter/tests.py": ["/splitter/models.py"], "/splitter/urls.py": ["/splitter/views.py"], "/splitter/forms.py": ["/splitter/models.py"], "/splitter/models.py": ["/splitter/utils.py"], "/splitter/admin.py": ["/splitter/models.py"], "/splitter/views.py": ["/splitter/models.py", "/splitter/forms.py"]}
|
1,007
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/__init__.py
|
# flake8:noqa
try:
from importlib import metadata
except ImportError:
# Running on pre-3.8 Python; use importlib-metadata package
import importlib_metadata as metadata
__version__ = metadata.version("good-smell")
from .smell_warning import SmellWarning
from .lint_smell import LintSmell
from .ast_smell import AstSmell, LoggingTransformer
from .smells import implemented_smells
from .main import fix_smell, print_fixed_smell, main, smell_warnings
from . import smells # Allow importing good_smell.smells
from .flake8_ext import LintingFlake8
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,008
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smells/range_len_fix.py
|
import ast
from good_smell import AstSmell, LoggingTransformer
from typing import Union, Container
class RangeLenSmell(AstSmell):
@property
def transformer_class(self):
return EnumerateFixer
@property
def symbol(self):
return "range-len"
@property
def warning_message(self) -> str:
return "Instead of using a c-style for loop, try using enumerate!"
class AssignDeleter(ast.NodeTransformer):
def __init__(self, seq: ast.Name, target: ast.Name):
self.id = target
self.seq = seq
self.elem_target = None or ast.Name(id="elm", ctx=ast.Store())
self.uses_seq = False
def visit_Assign(self, node: ast.Assign):
"""Deletes a node if it assigning using the for target"""
if self.accesses_seq(node.value):
self.elem_target = node.targets[0]
return None
return self.generic_visit(node)
@staticmethod
def __get_slice_id(node: ast.Subscript) -> Container[str]:
"""Get slice identifier.
Needed because in python3.9 ast.Subscript.slice became a ast.Name, instead of a ast.Index."""
slice = node.slice
if isinstance(slice, ast.Name):
return [slice.id]
if isinstance(slice, ast.Index):
return [slice.value.id]
if isinstance(slice, ast.Slice):
return [slice.upper, slice.lower]
def accesses_seq(self, node) -> bool:
"""Checks if the node acceses the sequence[target]"""
if (
isinstance(node, ast.Subscript)
and self.id.id in self.__get_slice_id(node)
and node.value.id == self.seq.id
):
self.uses_seq = True
return True
def visit_Subscript(self, node: ast.Subscript):
if self.accesses_seq(node):
return self.elem_target
return self.generic_visit(node)
class EnumerateFixer(LoggingTransformer):
def visit_For(self, node: ast.For) -> Union[bool, ast.For]:
enumerate_node = ast.Name(id="enumerate", ctx=ast.Load())
node_iterable = node.iter.args[0].args[0]
original_target = node.target
deleter = AssignDeleter(target=original_target, seq=node_iterable)
new_body = deleter.visit(node).body or [ast.Pass()]
elm_target = (
deleter.elem_target
if deleter.uses_seq
else ast.Name(id="_", ctx=ast.Store())
)
# for (original_target,elm_target) in enumerate(node_iterable):
new_node = ast.For(
target=ast.Tuple(elts=[original_target, elm_target], ctx=ast.Store()),
iter=ast.Call(func=enumerate_node, args=[node_iterable], keywords=[]),
body=new_body,
orelse=node.orelse,
)
new_node = ast.fix_missing_locations(ast.copy_location(new_node, node))
new_node = self.generic_visit(new_node)
return new_node
@staticmethod
def is_smelly(node: ast.For):
try:
return node.iter.func.id == "range" and node.iter.args[0].func.id == "len"
except AttributeError:
return False
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,009
|
Tadaboody/good_smell
|
refs/heads/master
|
/docs/generate_smell_doc.py
|
from tests.test_collection import collect_tests, test_case_files
def generate_smell_docs():
for example_test in [list(collect_tests(file))[0] for file in test_case_files]:
desc, symbols, before, after = example_test
symbol = list(symbols)[0]
print(
f"""### {desc} ({symbol})
```py
{before}```
Will be fixed to
```py
{after}```"""
)
if __name__ == "__main__":
generate_smell_docs()
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,010
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/ast_smell.py
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,011
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smell_warning.py
|
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,012
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smells/__init__.py
|
from .filter import FilterIterator
from .join_literal import JoinLiteral
from .nested_for import NestedFor
from .range_len_fix import RangeLenSmell
from .yield_from import YieldFrom
implemented_smells = (RangeLenSmell, NestedFor, FilterIterator, YieldFrom, JoinLiteral)
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,013
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/examples/example.py
|
#: example
# example-symbol,another-one
before = 0
before = 1
# ==>
after = 0
after = 1
# END
#: example
# None
before = 0
before = 1
# ==>
after = 0
after = 1
# END
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,014
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/examples/range_len.py
|
#: Range len instead of enumerate
# range-len
for i in range(len(sequence)):
a = sequence[i]
print(a)
# ==>
for i, a in enumerate(sequence):
print(a)
# END
#: Replace an empty body with pass
# range-len
for i in range(len(sequence)):
a = sequence[i]
# ==>
for i, a in enumerate(sequence):
pass
# END
#: replaces access
# range-len
for i in range(len(sequence)):
other_thing(sequence[i], i)
# ==>
for i, elm in enumerate(sequence):
other_thing(elm, i)
# END
#: Multiple replaces
# range-len
for i in range(len(sequence)):
x = sequence[i]
do_thing(x, i)
other_thing(sequence[i], i)
# ==>
for i, x in enumerate(sequence):
do_thing(x, i)
other_thing(x, i)
# END
#: Nested for
# range-len
for i in range(len(sequence)):
x = sequence[i]
for j in range(len(sequence)):
do_thing(x, j)
other_thing(sequence[i], i)
# ==>
for i, x in enumerate(sequence):
for j, _ in enumerate(sequence):
do_thing(x, j)
other_thing(x, i)
# END
#: Replace unused var with _
# range-len
for i in range(len(sequence)):
do_thing(i)
# ==>
for i, _ in enumerate(sequence):
do_thing(i)
# END
#: Don't remove an assign to something else
# range-len
for i in range(len(sequence)):
a = 0
print(sequence[j])
# ==>
for i, _ in enumerate(sequence):
a = 0
print(sequence[j])
# END
#: Behave correctly when used in the upper part of a slice
# range-len
for i in range(len(sequence)):
print(sequence[1:i])
# ==>
for i, _ in enumerate(sequence):
print(sequence[1:i])
# END
#: Don't replace access when used in the upper part of a slice
# range-len
for i in range(len(sequence)):
print(sequence[i:1])
# ==>
for i, _ in enumerate(sequence):
print(sequence[i:1])
# END
#: Don't replace access used in the upper part of a slice
# range-len
for i in range(len(sequence)):
print(sequence[2:1])
# ==>
for i, _ in enumerate(sequence):
print(sequence[2:1])
# END
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,015
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/main.py
|
from pathlib import Path
from typing import Iterable, Type
from fire import Fire
from good_smell import LintSmell, SmellWarning, implemented_smells
def print_smell_warnings(path: str):
"""Prints any warning messages about smells"""
print(
"\n".join(
warning.warning_string()
for warning in smell_warnings(Path(path).read_text(), path)
)
)
def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]:
for smell in implemented_smells:
yield from smell.from_source(
source_code=source, path=str(path), transform=False
).check_for_smell()
def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None):
"""Prints a fixed version of `source`"""
pathlib_path = Path(path)
source = pathlib_path.read_text()
print(fix_smell(source, starting_line, end_line))
def fix_smell(
source: str, starting_line: int = 0, end_line: int = None, path: str = None
) -> str:
"""Returns a fixed version of `source`"""
smell: Type[LintSmell]
for smell in implemented_smells:
source = smell.from_source(
source_code=source,
start_line=starting_line,
end_line=end_line,
path=path,
transform=True,
).fix_smell()
return source
def main():
Fire({"fix": print_fixed_smell})
if __name__ == "__main__":
main()
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,016
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smells/yield_from.py
|
from good_smell import AstSmell, LoggingTransformer
import ast
class YieldFrom(AstSmell):
"""Checks for yields inside for loops"""
@property
def transformer_class(self):
return YieldFromTransformer
@property
def warning_message(self):
return "Consider using yield from instead of yield inside of a for loop"
@property
def symbol(self):
return "yield-from"
class YieldFromTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the yields in fors and replaces them
with yield from"""
def visit_For(self, node: ast.For):
yield_from = ast.Expr(value=ast.YieldFrom(node.iter))
return ast.fix_missing_locations(yield_from)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is a yield inside a for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Yield)
)
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,017
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/examples/nested_for.py
|
#: Flatten for-loops using nested comprehensions
# nested-for
for i in seq_a:
for j in seq_b:
print(i, j)
# ==>
for i, j in ((i, j) for i in seq_a for j in seq_b):
print(i, j)
# END
#: Don't work if there's code between the loops (no way to know if it's unsafe)
# None
for i in seq_a:
print(i)
for j in seq_b:
print(i, j)
# ==>
for i in seq_a:
print(i)
for j in seq_b:
print(i, j)
# END
#: Don't work if there's code after the nested for
# None
for i in seq_a:
for j in seq_b:
print(i, j)
print(i)
# ==>
for i in seq_a:
for j in seq_b:
print(i, j)
print(i)
# END
#: Don't flatten a nested for with dependencies (#26)
# None
for num in range(1, 5):
for digits in range(1, 10 ** num):
pass
# ==>
for num in range(1, 5):
for digits in range(1, 10 ** num):
pass
# END
#: Check no errors with unpacking (#61)
# None
for i, num in enumerate(range(1, 5)):
for digits in range(1, 10 ** num):
pass
# ==>
for i, num in enumerate(range(1, 5)):
for digits in range(1, 10 ** num):
pass
# END
#: Check no errors with unpacking (#61), but also flatten
# nested-for
for i, j in enumerate(range(1, 5)):
for digits in range(1, 10 ** num):
pass
# ==>
for (i, j), digits in (
((i, j), digits)
for i, j in enumerate(range(1, 5))
for digits in range(1, 10 ** num)
):
pass
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,018
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/examples/yield_from.py
|
#: Use "yield from" instead of yield inside of a for loop
# yield-from
seq = range(10)
for x in seq:
yield x
# ==>
seq = range(10)
yield from seq
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,019
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/test_collection.py
|
import ast
import itertools
from os import PathLike
from pathlib import Path
from typing import Iterator, NamedTuple, Set
import astor
import black
import pytest
from good_smell import fix_smell, smell_warnings
FILE_DIR = Path(__file__).parent
EXAMPLES_DIR = FILE_DIR / "examples"
def normalize_formatting(code: str) -> str:
"""Returns a string of the code with normalized formatting for easier compares"""
code = astor.to_source(ast.parse(code))
try:
return black.format_file_contents(code, fast=True, mode=black.Mode())
except black.NothingChanged:
return code
class CollectedTest(NamedTuple):
desc: str
error_symbols: Set[str]
before: int
after: str
def is_title(line: str) -> bool:
return line.startswith(TITLE_PREFIX)
TITLE_PREFIX = "#:"
BEFORE_AFTER_SPLITTER = "==>"
END_SYMBOL = "END"
SPECIAL_SYMBOLS = (TITLE_PREFIX, BEFORE_AFTER_SPLITTER, END_SYMBOL)
def collect_tests(path: PathLike) -> Iterator[CollectedTest]:
"""Collects all test cases listed in `path`"""
with open(path) as fp:
lines = fp.readlines()
lines_iter = iter(lines) # Create iterator for continued iteration
for line_num, line in enumerate(line for line in lines_iter if is_title(line)):
desc = line.strip("#:").strip()
symbols_line = next(lines_iter).strip("#").strip()
symbols = {symbol for symbol in symbols_line.split(",") if symbol != "None"}
before = "".join(
itertools.takewhile(lambda l: BEFORE_AFTER_SPLITTER not in l, lines_iter)
)
after = "".join(itertools.takewhile(lambda l: END_SYMBOL not in l, lines_iter))
collected_test = CollectedTest(
desc=desc, error_symbols=symbols, before=before, after=after
)
if any(
symbol in field
for field, symbol in itertools.product(collected_test, SPECIAL_SYMBOLS)
):
raise Exception(
f"""Wrongly formatted example in {path}:{line_num}
{collected_test}"""
)
yield collected_test
def test_collect_tests():
example_path = EXAMPLES_DIR / "example.py"
collected_tests = list(collect_tests(example_path))
assert len(collected_tests) == 2
case_with_symbol, case_with_no_symbol = collected_tests
assert case_with_symbol.desc == "example"
assert case_with_symbol.error_symbols == {"example-symbol", "another-one"}
assert case_with_symbol.before == """before = 0\nbefore = 1\n"""
assert case_with_symbol.after == """after = 0\nafter = 1\n"""
assert case_with_no_symbol.error_symbols == set()
test_case_files = [f for f in EXAMPLES_DIR.iterdir() if "example" not in f.name]
def params_from_file():
for file in test_case_files:
yield from (
pytest.param(
case.before,
case.after,
case.error_symbols,
id=f"{file.with_suffix('').name}:{case.desc}",
)
for case in collect_tests(file)
)
@pytest.mark.parametrize(["before", "_", "symbols"], params_from_file())
def test_smell_warning(before, _, symbols):
assert set(symbols) == {smell.symbol for smell in smell_warnings(before)}
@pytest.mark.parametrize(["before", "after", "_"], list(params_from_file()))
def test_smell_fixing(before, after, _):
assert normalize_formatting(fix_smell(before)) == normalize_formatting(after)
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,020
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/lint_smell.py
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,021
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smells/nested_for.py
|
import ast
import typing
from good_smell import AstSmell, LoggingTransformer
class NameInNode(LoggingTransformer):
def __init__(self, name: ast.Name):
self.name = name
super().__init__(transform=False)
def is_smelly(self, node: ast.AST) -> bool:
return isinstance(node, ast.Name) and node.id == self.name.id
def name_in_node(node: ast.AST, name: ast.Name) -> bool:
"""Checks if the node `name` is in `node`"""
checker = NameInNode(name)
checker.visit(node)
return bool(checker.transformed_nodes)
class NestedFor(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return NestedForTransformer
@property
def warning_message(self):
return "Consider using a nested comprehension instead of a nested for"
@property
def symbol(self):
return "nested-for"
class NestedForTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the nested `for`s and replaces them
with itertools.product"""
def visit_For(self, node: ast.For) -> ast.For:
inner_for: ast.For = node.body[0]
new_target = ast.Tuple(elts=[node.target, inner_for.target])
def create_comprehension(for_node: ast.For) -> ast.comprehension:
return ast.comprehension(target=for_node.target, iter=for_node.iter, ifs=[])
gen_exp = ast.GeneratorExp(
elt=new_target,
generators=[create_comprehension(node), create_comprehension(inner_for)],
)
new_for = ast.For(
target=new_target, iter=gen_exp, body=inner_for.body, orelse=node.orelse
)
new_for = ast.fix_missing_locations(new_for)
return new_for
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and isinstance(node.body[0], ast.For)
and len(node.body) == 1
# Check there's no dependancy between nodes
and not any(
name_in_node(node.body[0].iter, target)
for target in for_target_names(node)
)
)
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
def for_target_names(node: ast.For) -> typing.List[ast.Name]:
"""Returns the names that are the targets of the for loop."""
target = typing.cast(typing.Union[ast.Tuple, ast.Name], node.target)
return target.elts if isinstance(target, ast.Tuple) else [target]
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,022
|
Tadaboody/good_smell
|
refs/heads/master
|
/good_smell/smells/join_literal.py
|
import ast
from good_smell import AstSmell, LoggingTransformer
try:
# ast.Str is deprecated in py3.8 and will be removed
StrConst = (ast.Constant, ast.Str)
except AttributeError:
StrConst = (ast.Constant,)
class JoinLiteral(AstSmell):
"""Checks if joining a literal of a sequence."""
@property
def transformer_class(self):
return Transformer
@property
def warning_message(self):
return (
"Consider using str.format instead of joining a constant amount of strings."
)
@property
def symbol(self):
return "join-literal"
class Transformer(LoggingTransformer):
"""Checks for usages of str.join with a constant amount of arguments."""
@staticmethod
def normalize_constant(node) -> ast.Constant:
"""Compatibility wrapper for py3.8+, ast, ast.Str and ast.Num are replaced by ast.Constant.
We don't type annotate `node` so it doesn't break on py3.10+ when these classes will be removed.
"""
for attr in ["value", "s", "n"]:
try:
return ast.Constant(value=getattr(node, attr))
except AttributeError:
pass
raise ValueError("Not a constat.")
def visit_Call(self, node: ast.Call) -> ast.Call:
format_arguments = node.args[0].elts
format_delimiter = self.normalize_constant(node.func.value).value
format_string = format_delimiter.join(["{}"] * len(format_arguments))
new_call = ast.Call(
func=ast.Attribute(
value=ast.Constant(value=format_string), attr="format", ctx=ast.Load()
),
args=format_arguments,
keywords=[],
)
return ast.fix_missing_locations(new_call)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, StrConst)
and node.func.attr == "join"
and len(node.args) == 1
and isinstance(node.args[0], ast.List)
and not any(isinstance(el, ast.Starred) for el in node.args[0].elts)
)
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,023
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/examples/join_literal.py
|
#: Warn when using join on a list of known literals.
# join-literal
a = "foo"
b = "bar"
",".join([a, b])
# ==>
a = "foo"
b = "bar"
"{},{}".format(a, b)
# END
#: Don't warn when joining an iterable
# None
iterable = ["a","b"]
",".join(iterable)
# ==>
iterable = ["a","b"]
",".join(iterable)
# END
#: Don't warn when joining a generator expression
# None
",".join(str(i) for i in range(100))
# ==>
",".join(str(i) for i in range(100))
# END
#: Don't warn when joining a list comprehension
# None
",".join([str(i) for i in range(100)])
# ==>
",".join([str(i) for i in range(100)])
# END
#: Don't warn when the list literal includes an unpacking
# None
",".join([1,2,3,*a])
# ==>
",".join([1,2,3,*a])
# END
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
1,024
|
Tadaboody/good_smell
|
refs/heads/master
|
/tests/test_no_transform.py
|
import itertools
import ast
from good_smell.smells import NestedFor
def compare_ast(node1, node2):
"""Compare two ast, adapted from https://stackoverflow.com/a/30581854 to py3"""
if type(node1) is not type(node2):
return False
if isinstance(node1, ast.AST):
for k, v in vars(node1).items():
if k in ("lineno", "col_offset", "ctx"):
continue
if not compare_ast(v, getattr(node2, k)):
return False
return True
elif isinstance(node1, list):
return all(itertools.starmap(compare_ast, zip(node1, node2)))
else:
return node1 == node2
def test_no_transform():
source = """
seq_a = [0]
seq_b = range(10)
for i in seq_a:
for j in seq_b:
print(i, j)"""
original_tree = ast.parse(source)
tree = ast.parse(source)
assert NestedFor(transform=False, path="test", tree=tree).check_for_smell()
assert compare_ast(original_tree, tree)
|
{"/good_smell/__init__.py": ["/good_smell/smell_warning.py", "/good_smell/lint_smell.py", "/good_smell/ast_smell.py", "/good_smell/smells/__init__.py", "/good_smell/main.py", "/good_smell/flake8_ext.py"], "/good_smell/smells/range_len_fix.py": ["/good_smell/__init__.py"], "/docs/generate_smell_doc.py": ["/tests/test_collection.py"], "/good_smell/ast_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/__init__.py": ["/good_smell/smells/filter.py", "/good_smell/smells/join_literal.py", "/good_smell/smells/nested_for.py", "/good_smell/smells/range_len_fix.py", "/good_smell/smells/yield_from.py"], "/good_smell/main.py": ["/good_smell/__init__.py"], "/good_smell/smells/yield_from.py": ["/good_smell/__init__.py"], "/tests/test_collection.py": ["/good_smell/__init__.py"], "/good_smell/lint_smell.py": ["/good_smell/__init__.py"], "/good_smell/smells/nested_for.py": ["/good_smell/__init__.py"], "/good_smell/smells/join_literal.py": ["/good_smell/__init__.py"], "/tests/test_no_transform.py": ["/good_smell/smells/__init__.py"], "/good_smell/smells/filter.py": ["/good_smell/__init__.py"], "/good_smell/flake8_ext.py": ["/good_smell/__init__.py"], "/tests/test_enumerate_fix.py": ["/good_smell/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.