repo_name
stringclasses 100
values | file_path
stringlengths 5
100
| file_content
stringlengths 27
51.9k
| imported_files_content
stringlengths 45
239k
| import_relationships
dict |
|---|---|---|---|---|
shuishen112/pairwise-rnn
|
/models/__init__.py
|
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
{
"imported_by": [],
"imports": [
"/models/QA_CNN_pairwise.py"
]
}
|
shuishen112/pairwise-rnn
|
/run.py
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
--- FILE SEPARATOR ---
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
{
"imported_by": [],
"imports": [
"/data_helper.py",
"/config.py"
]
}
|
shuishen112/pairwise-rnn
|
/test.py
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
--- FILE SEPARATOR ---
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
{
"imported_by": [],
"imports": [
"/data_helper.py",
"/config.py"
]
}
|
Sssssbo/SDCNet
|
/SDCNet.py
|
import datetime
import os
import time
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import pandas as pd
import numpy as np
import joint_transforms
from config import msra10k_path, MTDD_train_path
from datasets import ImageFolder_joint
from misc import AvgMeter, check_mkdir, cal_sc
from model import R3Net, SDCNet
from torch.backends import cudnn
cudnn.benchmark = True
torch.manual_seed(2021)
torch.cuda.set_device(6)
csv_path = './label_DUTS-TR.csv'
ckpt_path = './ckpt'
exp_name ='SDCNet'
args = {
'iter_num': 30000,
'train_batch_size': 16,
'last_iter': 0,
'lr': 1e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': ''
}
joint_transform = joint_transforms.Compose([
joint_transforms.RandomCrop(300),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
all_data = pd.read_csv(csv_path)
train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)#
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
def main():
net = SDCNet(num_classes = 5).cuda().train() #
print('training in ' + exp_name)
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * args['lr']
optimizer.param_groups[1]['lr'] = args['lr']
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
def train(net, optimizer):
start_time = time.time()
curr_iter = args['last_iter']
num_class = [0, 0, 0, 0, 0]
while True:
total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
batch_time = AvgMeter()
end = time.time()
print('-----begining the first stage, train_mode==0-----')
for i, data in enumerate(train_loader):
optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
inputs, gt, labels = data
print(labels)
# depends on the num of classes
cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5])
#weight = torch.ones(size=gt.shape)
weight = gt.clone().detach()
sizec = labels.numpy()
#ta = np.zeros(shape=gt.shape)
'''
np.zeros(shape=labels.shape)
sc = gt.clone().detach()
for i in range(len(sizec)):
gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))#
#print(gta.shape)
labels[i] = cal_sc(gta)
sizec[i] = labels[i]
print(labels)
'''
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda()
gt = Variable(gt).cuda()
labels = Variable(labels).cuda()
#print(sizec.shape)
optimizer.zero_grad()
p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1
criterion = nn.BCEWithLogitsLoss().cuda()
criterion2 = nn.CrossEntropyLoss().cuda()
gt2 = gt.long()
gt2 = gt2.squeeze(1)
l5 = criterion2(p5, gt2)
l4 = criterion2(p4, gt2)
l3 = criterion2(p3, gt2)
l2 = criterion2(p2, gt2)
l1 = criterion2(p1, gt2)
loss0 = criterion(predict11, gt)
loss10 = criterion(predict10, gt)
loss9 = criterion(predict9, gt)
loss8 = criterion(predict8, gt)
loss7 = criterion(predict7, gt)
loss6 = criterion(predict6, gt)
loss5 = criterion(predict5, gt)
loss4 = criterion(predict4, gt)
loss3 = criterion(predict3, gt)
loss2 = criterion(predict2, gt)
loss1 = criterion(predict1, gt)
total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10
total_loss.backward()
optimizer.step()
total_loss_record.update(total_loss.item(), batch_size)
loss1_record.update(l5.item(), batch_size)
loss0_record.update(loss0.item(), batch_size)
curr_iter += 1.0
batch_time.update(time.time() - end)
end = time.time()
log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \
'[l5 %.5f], [loss0 %.5f]\n' \
'[lr %.13f], [time %.4f]' % \
(curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'],
batch_time.avg)
print(log)
print('Num of class:', num_class)
open(log_path, 'a').write(log + '\n')
if curr_iter == args['iter_num']:
torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter))
torch.save(optimizer.state_dict(),
os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter))
total_time = time.time() - start_time
print(total_time)
return
if __name__ == '__main__':
main()
|
import torch
import torch.nn.functional as F
from torch import nn
from resnext import ResNeXt101
class R3Net(nn.Module):
def __init__(self):
super(R3Net, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reduce_low = nn.Sequential(
nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce_high = nn.Sequential(
nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
def forward(self, x, label = None):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
reduce_low = self.reduce_low(torch.cat((
layer0,
F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True),
F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1))
reduce_high = self.reduce_high(torch.cat((
layer3,
F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1))
reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True)
predict0 = self.predict0(reduce_high)
predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0
predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1
predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2
predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3
predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4
predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5
predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True)
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return predict0, predict1, predict2, predict3, predict4, predict5, predict6
return F.sigmoid(predict6)
#--------------------------------------------------------------------------------------------
class SDCNet(nn.Module):
def __init__(self, num_classes):
super(SDCNet, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reducex = nn.Sequential(
nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.reduce5 = nn.Sequential(
nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce6 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce7 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce8 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce9 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce10 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# --------------extra module---------------
self.reduce3_0 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_1 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_2 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_4 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_0 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_1 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_3 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_4 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_0 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_2 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_3 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_4 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_0 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_1 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_2 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_3 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_4 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict7 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict8 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict9 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict10 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.pre4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.reducex_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc0 = nn.Sequential(
nn.BatchNorm1d(256),
nn.Dropout(0.5),
nn.Linear(256, num_classes),
)
def forward(self, x, c):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
l1_size = layer1.size()[2:]
l2_size = layer2.size()[2:]
l3_size = layer3.size()[2:]
F1 = self.reducex(layer4)
p4 = self.pre4(F1)
p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1))
p3 = self.pre3(F0_3)
p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True)
F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1))
p2 = self.pre2(F0_2)
p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True)
F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1))
p1 = self.pre1(F0_1)
p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True)
p5 = p4 + p3 + p2 + p1
#saliency detect
predict1 = self.predict1(F1)
predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True)
F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F2 = F1[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F2[i, :, :, :] = self.reduce3_0(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F2[i, :, :, :] = self.reduce3_1(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F2[i, :, :, :] = self.reduce3_2(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F2[i, :, :, :] = self.reduce3_3(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F2[i, :, :, :] = self.reduce3_4(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
predict2 = self.predict2(F2) + predict1
predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True)
F3 = F2[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F3[i, :, :, :] = self.reduce2_0(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F3[i, :, :, :] = self.reduce2_1(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F3[i, :, :, :] = self.reduce2_2(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F3[i, :, :, :] = self.reduce2_3(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F3[i, :, :, :] = self.reduce2_4(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
predict3 = self.predict3(F3) + predict2
predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True)
F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True)
F4 = F3[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F4[i, :, :, :] = self.reduce1_0(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F4[i, :, :, :] = self.reduce1_1(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F4[i, :, :, :] = self.reduce1_2(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F4[i, :, :, :] = self.reduce1_3(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F4[i, :, :, :] = self.reduce1_4(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
predict4 = self.predict4(F4) + predict3
F5 = self.reduce5(torch.cat((F4, layer0), 1))
predict5 = self.predict5(F5) + predict4
F0 = F4[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 1:
F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 2:
F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 3:
F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 4:
F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0))
F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True)
F6 = self.reduce6(torch.cat((F0, F5), 1))
F7 = self.reduce7(torch.cat((F0, F4), 1))
F8 = self.reduce8(torch.cat((F0, F3), 1))
F9 = self.reduce9(torch.cat((F0, F2), 1))
F10 = self.reduce10(torch.cat((F0, F1), 1))
predict6 = self.predict6(F6) + predict5
predict7 = self.predict7(F7) + predict6
predict8 = self.predict8(F8) + predict7
predict9 = self.predict9(F9) + predict8
predict10 = self.predict10(F10) + predict9
predict11 = predict6 + predict7 + predict8 + predict9 + predict10
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True)
predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True)
predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True)
predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True)
predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11
return F.sigmoid(predict11)
#----------------------------------------------------------------------------------------
class _ASPP(nn.Module):
def __init__(self, in_dim):
super(_ASPP, self).__init__()
down_dim = in_dim // 2
self.conv1 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.fuse = nn.Sequential(
nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU()
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
conv4 = self.conv4(x)
conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear',
align_corners=True)
return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1))
--- FILE SEPARATOR ---
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
--- FILE SEPARATOR ---
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
{
"imported_by": [],
"imports": [
"/model.py",
"/datasets.py",
"/misc.py"
]
}
|
Sssssbo/SDCNet
|
/create_free.py
|
import numpy as np
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import cv2
import numpy as np
from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path
from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure
from datasets import TestFolder_joint
import joint_transforms
from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C
torch.manual_seed(2018)
# set which gpu to use
torch.cuda.set_device(0)
ckpt_path = './ckpt'
test_path = './test_ECSSD.csv'
def main():
img = np.zeros((512, 512),dtype = np.uint8)
img2 = cv2.imread('./0595.PNG', 0)
cv2.imshow('img',img2)
#cv2.waitKey(0)
print(img, img2)
Image.fromarray(img).save('./free.png')
if __name__ == '__main__':
main()
|
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
--- FILE SEPARATOR ---
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
{
"imported_by": [],
"imports": [
"/datasets.py",
"/misc.py"
]
}
|
Sssssbo/SDCNet
|
/infer_SDCNet.py
|
import numpy as np
import os
import torch
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc
from datasets import TestFolder_joint
import joint_transforms
from model import R3Net, SDCNet
torch.manual_seed(2021)
# set which gpu to use
torch.cuda.set_device(6)
# the following two args specify the location of the file of trained model (pth extension)
# you should have the pth file in the folder './$ckpt_path$/$exp_name$'
ckpt_path = './ckpt'
exp_name = 'SDCNet'
msra10k_path = './SOD_label/label_msra10k.csv'
ecssd_path = './SOD_label/label_ECSSD.csv'
dutomrom_path = './SOD_label/label_DUT-OMROM.csv'
dutste_path = './SOD_label/label_DUTS-TE.csv'
hkuis_path = './SOD_label/label_HKU-IS.csv'
pascals_path = './SOD_label/label_PASCAL-S.csv'
sed2_path = './SOD_label/label_SED2.csv'
socval_path = './SOD_label/label_SOC-Val.csv'
sod_path = './SOD_label/label_SOD.csv'
thur15k_path = './SOD_label/label_THUR-15K.csv'
args = {
'snapshot': '30000', # your snapshot filename (exclude extension name)
'save_results': True, # whether to save the resulting masks
'test_mode': 1
}
joint_transform = joint_transforms.Compose([
#joint_transforms.RandomCrop(300),
#joint_transforms.RandomHorizontallyFlip(),
#joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.Resize((300, 300)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path}
def main():
net = SDCNet(num_classes = 5).cuda()
print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode']))
print(exp_name)
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net.eval()
results = {}
with torch.no_grad():
for name, root in to_test.items():
print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name))
test_data = pd.read_csv(root)
test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform)
test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False)
precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
mae0_record = AvgMeter()
mae1_record = AvgMeter()
mae2_record = AvgMeter()
mae3_record = AvgMeter()
mae4_record = AvgMeter()
mae5_record = AvgMeter()
mae6_record = AvgMeter()
n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0
if args['save_results']:
check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot'])))
for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)):
shape = gt.size()[2:]
img_var = Variable(inputs).cuda()
img = np.array(to_pil(img_var.data.squeeze(0).cpu()))
gt = np.array(to_pil(gt.data.squeeze(0).cpu()))
sizec = labels.numpy()
pred2021 = net(img_var, sizec)
pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True)
pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu()))
if labels == 0:
precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision1, recall1)):
p, r = pdata
precision1_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall1_record[pidx].update(r)
mae1_record.update(mae1)
n1 += 1
elif labels == 1:
precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision2, recall2)):
p, r = pdata
precision2_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall2_record[pidx].update(r)
mae2_record.update(mae2)
n2 += 1
elif labels == 2:
precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision3, recall3)):
p, r = pdata
precision3_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall3_record[pidx].update(r)
mae3_record.update(mae3)
n3 += 1
elif labels == 3:
precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision4, recall4)):
p, r = pdata
precision4_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall4_record[pidx].update(r)
mae4_record.update(mae4)
n4 += 1
elif labels == 4:
precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision5, recall5)):
p, r = pdata
precision5_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall5_record[pidx].update(r)
mae5_record.update(mae5)
n5 += 1
precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision6, recall6)):
p, r = pdata
precision6_record[pidx].update(p)
recall6_record[pidx].update(r)
mae6_record.update(mae6)
img_name = os.path.split(str(img_path))[1]
img_name = os.path.splitext(img_name)[0]
n0 += 1
if args['save_results']:
Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % (
name, args['snapshot']), img_name + '_2021.png'))
fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record],
[rrecord.avg for rrecord in recall1_record])
fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record],
[rrecord.avg for rrecord in recall2_record])
fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record],
[rrecord.avg for rrecord in recall3_record])
fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record],
[rrecord.avg for rrecord in recall4_record])
fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record],
[rrecord.avg for rrecord in recall5_record])
fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record],
[rrecord.avg for rrecord in recall6_record])
results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2,
'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg,
'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5,
'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg}
print('test results:')
print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\
'[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\
'[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\
'[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\
'[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\
'[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\
(fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0))
def accuracy(y_pred, y_actual, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
final_acc = 0
maxk = max(topk)
# for prob_threshold in np.arange(0, 1, 0.01):
PRED_COUNT = y_actual.size(0)
PRED_CORRECT_COUNT = 0
prob, pred = y_pred.topk(maxk, 1, True, True)
# prob = np.where(prob > prob_threshold, prob, 0)
for j in range(pred.size(0)):
if int(y_actual[j]) == int(pred[j]):
PRED_CORRECT_COUNT += 1
if PRED_COUNT == 0:
final_acc = 0
else:
final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT)
return final_acc * 100, PRED_COUNT
if __name__ == '__main__':
main()
|
import torch
import torch.nn.functional as F
from torch import nn
from resnext import ResNeXt101
class R3Net(nn.Module):
def __init__(self):
super(R3Net, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reduce_low = nn.Sequential(
nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce_high = nn.Sequential(
nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
def forward(self, x, label = None):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
reduce_low = self.reduce_low(torch.cat((
layer0,
F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True),
F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1))
reduce_high = self.reduce_high(torch.cat((
layer3,
F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1))
reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True)
predict0 = self.predict0(reduce_high)
predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0
predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1
predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2
predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3
predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4
predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5
predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True)
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return predict0, predict1, predict2, predict3, predict4, predict5, predict6
return F.sigmoid(predict6)
#--------------------------------------------------------------------------------------------
class SDCNet(nn.Module):
def __init__(self, num_classes):
super(SDCNet, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reducex = nn.Sequential(
nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.reduce5 = nn.Sequential(
nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce6 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce7 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce8 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce9 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce10 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# --------------extra module---------------
self.reduce3_0 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_1 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_2 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_4 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_0 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_1 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_3 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_4 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_0 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_2 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_3 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_4 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_0 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_1 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_2 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_3 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_4 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict7 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict8 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict9 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict10 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.pre4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.reducex_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc0 = nn.Sequential(
nn.BatchNorm1d(256),
nn.Dropout(0.5),
nn.Linear(256, num_classes),
)
def forward(self, x, c):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
l1_size = layer1.size()[2:]
l2_size = layer2.size()[2:]
l3_size = layer3.size()[2:]
F1 = self.reducex(layer4)
p4 = self.pre4(F1)
p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1))
p3 = self.pre3(F0_3)
p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True)
F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1))
p2 = self.pre2(F0_2)
p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True)
F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1))
p1 = self.pre1(F0_1)
p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True)
p5 = p4 + p3 + p2 + p1
#saliency detect
predict1 = self.predict1(F1)
predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True)
F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F2 = F1[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F2[i, :, :, :] = self.reduce3_0(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F2[i, :, :, :] = self.reduce3_1(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F2[i, :, :, :] = self.reduce3_2(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F2[i, :, :, :] = self.reduce3_3(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F2[i, :, :, :] = self.reduce3_4(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
predict2 = self.predict2(F2) + predict1
predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True)
F3 = F2[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F3[i, :, :, :] = self.reduce2_0(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F3[i, :, :, :] = self.reduce2_1(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F3[i, :, :, :] = self.reduce2_2(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F3[i, :, :, :] = self.reduce2_3(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F3[i, :, :, :] = self.reduce2_4(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
predict3 = self.predict3(F3) + predict2
predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True)
F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True)
F4 = F3[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F4[i, :, :, :] = self.reduce1_0(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F4[i, :, :, :] = self.reduce1_1(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F4[i, :, :, :] = self.reduce1_2(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F4[i, :, :, :] = self.reduce1_3(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F4[i, :, :, :] = self.reduce1_4(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
predict4 = self.predict4(F4) + predict3
F5 = self.reduce5(torch.cat((F4, layer0), 1))
predict5 = self.predict5(F5) + predict4
F0 = F4[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 1:
F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 2:
F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 3:
F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 4:
F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0))
F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True)
F6 = self.reduce6(torch.cat((F0, F5), 1))
F7 = self.reduce7(torch.cat((F0, F4), 1))
F8 = self.reduce8(torch.cat((F0, F3), 1))
F9 = self.reduce9(torch.cat((F0, F2), 1))
F10 = self.reduce10(torch.cat((F0, F1), 1))
predict6 = self.predict6(F6) + predict5
predict7 = self.predict7(F7) + predict6
predict8 = self.predict8(F8) + predict7
predict9 = self.predict9(F9) + predict8
predict10 = self.predict10(F10) + predict9
predict11 = predict6 + predict7 + predict8 + predict9 + predict10
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True)
predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True)
predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True)
predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True)
predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11
return F.sigmoid(predict11)
#----------------------------------------------------------------------------------------
class _ASPP(nn.Module):
def __init__(self, in_dim):
super(_ASPP, self).__init__()
down_dim = in_dim // 2
self.conv1 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.fuse = nn.Sequential(
nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU()
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
conv4 = self.conv4(x)
conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear',
align_corners=True)
return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1))
--- FILE SEPARATOR ---
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
--- FILE SEPARATOR ---
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
{
"imported_by": [],
"imports": [
"/model.py",
"/datasets.py",
"/misc.py"
]
}
|
Sssssbo/SDCNet
|
/model/make_model.py
|
import torch
import torch.nn as nn
from .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet, Jointin_ResNet, Jointout_ResNet, BasicBlock, Bottleneck, GDN_Bottleneck, IN_Bottleneck, IN2_Bottleneck, SNR_Bottleneck, SNR2_Bottleneck, SNR3_Bottleneck
from loss.arcface import ArcFace
from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .backbones.se_resnet_ibn_a import se_resnet50_ibn_a, se_resnet101_ibn_a
import torch.nn.functional as F
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Backbone(nn.Module):
def __init__(self, num_classes, cfg):
super(Backbone, self).__init__()
last_stride = cfg.MODEL.LAST_STRIDE
model_path = cfg.MODEL.PRETRAIN_PATH
model_name = cfg.MODEL.NAME
self.model_name = cfg.MODEL.NAME
pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE
#block = cfg.MODEL.BLOCK
self.cos_layer = cfg.MODEL.COS_LAYER
self.neck = cfg.MODEL.NECK
self.neck_feat = cfg.TEST.NECK_FEAT
if model_name == 'Pure_resnet50_GDN':
self.in_planes = 2048
self.base = ResNet(last_stride=last_stride,
block=GDN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Comb_resnet50_IN':
self.in_planes = 2048
self.base = Comb_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_IN2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
elif model_name == 'Pure_resnet50_IN':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointin_resnet50_SNR3':
self.in_planes = 2048
self.base = Jointin_ResNet(last_stride=last_stride,
block=SNR3_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_None':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_IN':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'resnet18':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[2, 2, 2, 2])
print('using resnet18 as a backbone')
elif model_name == 'resnet34':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3])
print('using resnet34 as a backbone')
elif model_name == 'resnet50_ibn_a':
self.in_planes = 2048
self.base = resnet50_ibn_a(last_stride)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'se_resnet50_ibn_a':
self.in_planes = 2048
self.base = se_resnet50_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'resnet101_ibn_a':
self.in_planes = 2048
self.base = resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using resnet101_ibn_a as a backbone')
elif model_name == 'se_resnet101_ibn_a':
self.in_planes = 2048
self.base = se_resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet101_ibn_a as a backbone')
else:
print('unsupported backbone! but got {}'.format(model_name))
if pretrain_choice == 'imagenet':
self.base.load_param(model_path)
print('Loading pretrained ImageNet model......from {}'.format(model_path))
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
if self.cos_layer:
print('using cosine layer')
self.arcface = ArcFace(
self.in_planes, self.num_classes, s=30.0, m=0.50)
else:
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
if model_name == 'Jointin_resnet50_SNR3':
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier3 = nn.Linear(512, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier4 = nn.Linear(512, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier5 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier5.apply(weights_init_classifier)
self.classifier6 = nn.Linear(256, self.num_classes, bias=False)
self.classifier6.apply(weights_init_classifier)
self.classifier7 = nn.Linear(256, self.num_classes, bias=False)
self.classifier7.apply(weights_init_classifier)
self.classifier8 = nn.Linear(256, self.num_classes, bias=False)
self.classifier8.apply(weights_init_classifier)
self.classifier9 = nn.Linear(256, self.num_classes, bias=False)
self.classifier9.apply(weights_init_classifier)
self.classifier10 = nn.Linear(512, self.num_classes, bias=False)
self.classifier10.apply(weights_init_classifier)
self.classifier11 = nn.Linear(128, self.num_classes, bias=False)
self.classifier11.apply(weights_init_classifier)
self.classifier12 = nn.Linear(128, self.num_classes, bias=False)
self.classifier12.apply(weights_init_classifier)
self.classifier13 = nn.Linear(128, self.num_classes, bias=False)
self.classifier13.apply(weights_init_classifier)
self.classifier14 = nn.Linear(128, self.num_classes, bias=False)
self.classifier14.apply(weights_init_classifier)
self.classifier15 = nn.Linear(256, self.num_classes, bias=False)
self.classifier15.apply(weights_init_classifier)
self.classifier16 = nn.Linear(64, self.num_classes, bias=False)
self.classifier16.apply(weights_init_classifier)
self.classifier17 = nn.Linear(64, self.num_classes, bias=False)
self.classifier17.apply(weights_init_classifier)
self.classifier18 = nn.Linear(64, self.num_classes, bias=False)
self.classifier18.apply(weights_init_classifier)
self.classifier19 = nn.Linear(64, self.num_classes, bias=False)
self.classifier19.apply(weights_init_classifier)
elif 'Jointout' in model_name:
self.classifier0 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0.apply(weights_init_classifier)
self.classifier0_1 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0_1.apply(weights_init_classifier)
self.classifier1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier1_1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1_1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier2_1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2_1.apply(weights_init_classifier)
self.classifier3 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier3_1 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3_1.apply(weights_init_classifier)
self.classifier4 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier4_1 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4_1.apply(weights_init_classifier)
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
def forward(self, x, label=None, camid=None): # label is unused if self.cos_layer == 'no'
if self.training and self.model_name == 'Jointin_resnet50_SNR3':
x, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
fx4_2 = nn.functional.avg_pool2d(x4_2, x4_2.shape[2:4])
fx4_2 = fx4_2.view(fx4_2.shape[0], -1)
ax4_2 = self.classifier1(fx4_2)
fx4_1 = nn.functional.avg_pool2d(x4_1, x4_1.shape[2:4])
fx4_1 = fx4_1.view(fx4_1.shape[0], -1)
ax4_1 = self.classifier2(fx4_1)
fres4_2 = nn.functional.avg_pool2d(res4_2, res4_2.shape[2:4])
fres4_2 = fres4_2.view(fres4_2.shape[0], -1)
ares4_2 = self.classifier3(fres4_2)
fres4_1 = nn.functional.avg_pool2d(res4_1, res4_1.shape[2:4])
fres4_1 = fres4_1.view(fres4_1.shape[0], -1)
ares4_1 = self.classifier4(fres4_1)
fx3_3 = nn.functional.avg_pool2d(x3_3, x3_3.shape[2:4])
fx3_3 = fx3_3.view(fx3_3.shape[0], -1)
ax3_3 = self.classifier5(fx3_3)
fx3_2 = nn.functional.avg_pool2d(x3_2, x3_2.shape[2:4])
fx3_2 = fx3_2.view(fx3_2.shape[0], -1)
ax3_2 = self.classifier6(fx3_2)
fx3_1 = nn.functional.avg_pool2d(x3_1, x3_1.shape[2:4])
fx3_1 = fx3_1.view(fx3_1.shape[0], -1)
ax3_1 = self.classifier7(fx3_1)
fres3_2 = nn.functional.avg_pool2d(res3_2, res3_2.shape[2:4])
fres3_2 = fres3_2.view(fres3_2.shape[0], -1)
ares3_2 = self.classifier8(fres3_2)
fres3_1 = nn.functional.avg_pool2d(res3_1, res3_1.shape[2:4])
fres3_1 = fres3_1.view(fres3_1.shape[0], -1)
ares3_1 = self.classifier9(fres3_1)
fx2_3 = nn.functional.avg_pool2d(x2_3, x2_3.shape[2:4])
fx2_3 = fx2_3.view(fx2_3.shape[0], -1)
ax2_3 = self.classifier10(fx2_3)
fx2_2 = nn.functional.avg_pool2d(x2_2, x2_2.shape[2:4])
fx2_2 = fx2_2.view(fx2_2.shape[0], -1)
ax2_2 = self.classifier11(fx2_2)
fx2_1 = nn.functional.avg_pool2d(x2_1, x2_1.shape[2:4])
fx2_1 = fx2_1.view(fx2_1.shape[0], -1)
ax2_1 = self.classifier12(fx2_1)
fres2_2 = nn.functional.avg_pool2d(res2_2, res2_2.shape[2:4])
fres2_2 = fres2_2.view(fres2_2.shape[0], -1)
ares2_2 = self.classifier13(fres2_2)
fres2_1 = nn.functional.avg_pool2d(res2_1, res2_1.shape[2:4])
fres2_1 = fres2_1.view(fres2_1.shape[0], -1)
ares2_1 = self.classifier14(fres2_1)
fx1_3 = nn.functional.avg_pool2d(x1_3, x1_3.shape[2:4])
fx1_3 = fx1_3.view(fx1_3.shape[0], -1)
ax1_3 = self.classifier15(fx1_3)
fx1_2 = nn.functional.avg_pool2d(x1_2, x1_2.shape[2:4])
fx1_2 = fx1_2.view(fx1_2.shape[0], -1)
ax1_2 = self.classifier16(fx1_2)
fx1_1 = nn.functional.avg_pool2d(x1_1, x1_1.shape[2:4])
fx1_1 = fx1_1.view(fx1_1.shape[0], -1)
ax1_1 = self.classifier17(fx1_1)
fres1_2 = nn.functional.avg_pool2d(res1_2, res1_2.shape[2:4])
fres1_2 = fres1_2.view(fres1_2.shape[0], -1)
ares1_2 = self.classifier18(fres1_2)
fres1_1 = nn.functional.avg_pool2d(res1_1, res1_1.shape[2:4])
fres1_1 = fres1_1.view(fres1_1.shape[0], -1)
ares1_1 = self.classifier19(fres1_1)
return cls_score, global_feat, ax4_2, ax4_1, ares4_2, ares4_1, ax3_3, ax3_2, ax3_1, ares3_2, ares3_1, ax2_3, ax2_2, ax2_1, ares2_2, ares2_1, ax1_3, ax1_2, ax1_1, ares1_2, ares1_1
elif 'Jointout' in self.model_name and self.training:
x0, x1, x2, x3, x4, res0, res1, res2, res3, res4 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x4, x4.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier4(feat)
res4 = nn.functional.avg_pool2d(res4, res4.shape[2:4])
res4 = res4.view(res4.shape[0], -1)
res4 = self.classifier4_1(res4)
x3 = nn.functional.avg_pool2d(x3, x3.shape[2:4])
x3 = x3.view(x3.shape[0], -1)
x3 = self.classifier3_1(x3)
res3 = nn.functional.avg_pool2d(res3, res3.shape[2:4])
res3 = res3.view(res3.shape[0], -1)
res3 = self.classifier3(res3)
x2 = nn.functional.avg_pool2d(x2, x2.shape[2:4])
x2 = x2.view(x2.shape[0], -1)
x2 = self.classifier2(x2)
res2 = nn.functional.avg_pool2d(res2, res2.shape[2:4])
res2 = res2.view(res2.shape[0], -1)
res2 = self.classifier2_1(res2)
x1 = nn.functional.avg_pool2d(x1, x1.shape[2:4])
x1 = x1.view(x1.shape[0], -1)
x1 = self.classifier1(x1)
res1 = nn.functional.avg_pool2d(res1, res1.shape[2:4])
res1 = res1.view(res1.shape[0], -1)
res1 = self.classifier1_1(res1)
x0 = nn.functional.avg_pool2d(x0, x0.shape[2:4])
x0 = x0.view(x0.shape[0], -1)
x0 = self.classifier0(x0)
res0 = nn.functional.avg_pool2d(res0, res0.shape[2:4])
res0 = res0.view(res0.shape[0], -1)
res0 = self.classifier0_1(res0)
return global_feat, x0, x1, x2, x3, cls_score, res0, res1, res2, res3, res4
x = self.base(x, camid)
# print(x.shape)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
# print(global_feat.shape)
# print(x.shape)
# for convert to onnx, kernel size must be from x.shape[2:4] to a constant [20,20]
#global_feat = nn.functional.avg_pool2d(x, [16, 16])
# flatten to (bs, 2048), global_feat.shape[0]
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
if self.neck == 'no':
feat = global_feat
elif self.neck == 'bnneck':
feat = self.bottleneck(global_feat)
if self.training:
if self.cos_layer:
cls_score = self.arcface(feat, label)
else:
cls_score = self.classifier(feat)
return cls_score, global_feat # global feature for triplet loss
else:
if self.neck_feat == 'after':
# print("Test with feature after BN")
return feat
else:
# print("Test with feature before BN")
return global_feat
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
def load_param_finetune(self, model_path):
param_dict = torch.load(model_path)
# for i in param_dict:
# print(i)#change by sb
# self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model for finetuning from {}'.format(model_path))
def make_model(cfg, num_class):
model = Backbone(num_class, cfg)
return model
|
import math
import torch
from torch import nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class GDN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(GDN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3_0 = nn.BatchNorm2d(
planes * 4, affine=False, track_running_stats=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.in1 = nn.InstanceNorm2d(planes)
self.in2 = nn.InstanceNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out1 = torch.zeros_like(out)
if self.training == True:
#print("training with gdn block")
out1[:8] = self.bn1_0(out[:8])
out1[8:16] = self.bn1_0(out[8:16])
out1[16:] = self.bn1_0(out[16:])
else:
#print("test for gdn block")
out1 = self.in1(out)
out = self.bn1(out1)
out = self.relu(out)
out = self.conv2(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn2_0(out[:8])
out1[8:16] = self.bn2_0(out[8:16])
out1[16:] = self.bn2_0(out[16:])
else:
out1 = self.in1(out)
out = self.bn2(out1)
out = self.relu(out)
out = self.conv3(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn3_0(out[:8])
out1[8:16] = self.bn3_0(out[8:16])
out1[16:] = self.bn3_0(out[16:])
else:
out1 = self.in2(out)
out = self.bn3(out1)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1_0(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2_0(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.in3_0(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.conv3_1 = nn.Sequential(
nn.Conv2d(planes * 8, planes * 4, kernel_size=1, bias=False), nn.BatchNorm2d(planes * 4)
)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
out1 = self.bn1(out1)
out1 = self.relu(out1)
x1 = self.conv1_1(torch.cat((out1,x1),1))
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
out2 = self.bn2(out2)
out2 = self.relu(out2)
x2 = self.conv2_1(torch.cat((out2,x2),1))
x3 = self.conv3(x2)
out3 = self.in3_0(x3)
out3 = self.bn3(out3)
out3 = self.relu(out3)
x3 = self.conv3_1(torch.cat((out3,x3),1))
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = self.bn1(x1)
x1 = out1 + res1
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = self.bn2(x2)
x2 = out2 + res2
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
if self.stride == 2: res1 = self.maxpool(res1)
res2 = x2 - out2 + res1
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR3_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR3_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
class SNR4_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR4_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
# --------------------------------- resnet-----------------------------------
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Comb resnet-----------------------------------
class Comb_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.conv2 = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.conv3 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=1)
)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.conv4 = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=1)
)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.conv5 = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=1)
)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
xin = self.in1(x)
xin = self.bn1_1(xin)
xin = self.relu(xin)
x = self.conv2(torch.cat((xin,x),1))
x = self.layer1(x)
xin = self.in2(x)
xin = self.bn2_1(xin)
xin = self.relu(xin)
x = self.conv3(torch.cat((xin,x),1))
x = self.layer2(x)
xin = self.in3(x)
xin = self.bn3_1(xin)
xin = self.relu(xin)
x = self.conv4(torch.cat((xin,x),1))
x = self.layer3(x)
xin = self.in4(x)
xin = self.bn4_1(xin)
xin = self.relu(xin)
x = self.conv5(torch.cat((xin,x),1))
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Pure resnet-----------------------------------
class Pure_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
#print(camid)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
if False:
x,_,_,_,_ = self.layer1(x)
x,_,_,_,_ = self.layer2(x)
x,_,_,_,_ = self.layer3(x)
x,_,_,_,_ = self.layer4(x)
else:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointin resnet-----------------------------------
class Jointin_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
'''
res0 = x - x0
res0 = self.conv1_1(res0)
res0 = self.bn1_1(res0)
x0 = x0 + res0
'''
x0 = self.bn1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1_3, x1_2, x1_1, res1_2, res1_1 = self.layer1(x0)
x2_3, x2_2, x2_1, res2_2, res2_1 = self.layer2(x1_3)
x3_3, x3_2, x3_1, res3_2, res3_1 = self.layer3(x2_3)
x4_3, x4_2, x4_1, res4_2, res4_1 = self.layer4(x3_3)
if self.training:
return x4_3, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1
else:
return x4_3
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointout resnet-----------------------------------
class Jointout_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_res = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in1 = nn.InstanceNorm2d(64)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.bn2_0 = nn.BatchNorm2d(256)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.bn3_0 = nn.BatchNorm2d(512)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.bn4_0 = nn.BatchNorm2d(1024)
self.in5 = nn.InstanceNorm2d(2048)
self.bn5_1 = nn.BatchNorm2d(2048)
self.bn5_0 = nn.BatchNorm2d(2048)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv2_res = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 256, kernel_size=1)
)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv3_res = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 512, kernel_size=1)
)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.conv4 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4_res = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 1024, kernel_size=1)
)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
self.conv5 = nn.Conv2d(2048, 2048, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv5_res = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 2048, kernel_size=1)
)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
res0 = x - x0
x0 = self.bn1(x0)
x0 = self.relu(x0)
res0 = self.conv1_res(res0)
x0 = x0 + res0
x0 = self.bn1_1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1 = self.layer1(x0)
px1 = self.conv2(x1)
x1 = self.in2(px1)
res1 = px1 - x1
x1 = self.bn2_0(x1)
x1 = self.relu(x1)
res1 = self.conv2_res(res1)
x1 = x1 + res1
x1 = self.bn2_1(x1)
x1 = self.relu(x1)
x2 = self.layer2(x1)
px2 = self.conv3(x2)
x2 = self.in3(px2)
res2 = px2 - x2
x2 = self.bn3_0(x2)
x2 = self.relu(x2)
res2 = self.conv3_res(res2)
x2 = x2 + res2
x2 = self.bn3_1(x2)
x2 = self.relu(x2)
x3 = self.layer3(x2)
px3 = self.conv4(x3)
x3 = self.in4(px3)
res3 = px3 - x3
x3 = self.bn4_0(x3)
x3 = self.relu(x3)
res3 = self.conv4_res(res3)
x3 = x3 + res3
x3 = self.bn4_1(x3)
x3 = self.relu(x3)
x4 = self.layer4(x3)
px4 = self.conv5(x4)
x4 = self.in5(px4)
res4 = px4 - x4
x4 = self.bn5_0(x4)
x4 = self.relu(x4)
res4 = self.conv5_res(res4)
x4 = x4 + res4
x4 = self.bn5_1(x4)
x4 = self.relu(x4)
if self.training:
return x0, x1, x2, x3, x4, res0, res1, res2, res3, res4
else:
return x4
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
{
"imported_by": [],
"imports": [
"/model/backbones/resnet.py"
]
}
|
Sssssbo/SDCNet
|
/resnet/__init__.py
|
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
|
from .resnet import ResNet, BasicBlock, Bottleneck
import torch
from torch import nn
from .config import resnet50_path
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet50(nn.Module):
def __init__(self):
super(ResNet50, self).__init__()
net = ResNet(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_BIN(nn.Module):
def __init__(self):
super(ResNet50_BIN, self).__init__()
net = ResNet(last_stride=2,
block=IN_Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_LowIN(nn.Module):
def __init__(self):
super(ResNet50_LowIN, self).__init__()
net = ResNet_LowIN(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
|
{
"imported_by": [],
"imports": [
"/resnet/make_model.py"
]
}
|
riadghorra/whiteboard-oop-project
|
/src/client.py
|
import socket
import json
import sys
import math
from white_board import WhiteBoard, binary_to_dict
'''
Ouverture de la configuration initiale stockée dans config.json qui contient le mode d'écriture, la couleur et
la taille d'écriture.
Ces Paramètres sont ensuite à modifier par l'utisateur dans l'interface pygame
'''
with open('config.json') as json_file:
start_config = json.load(json_file)
'''
définition de l'adresse IP du serveur. Ici le serveur est en local.
'''
hote = start_config["ip_serveur"]
port = 5001
def main():
"""
Création d'un socket pour communiquer via un protocole TCP/IP
"""
connexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connexion au serveur
try:
connexion_avec_serveur.connect((hote, port))
except (TimeoutError, ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as e:
return print("Le serveur n'a pas répondu, vérifiez les paramètres de connexion")
print("Connexion réussie avec le serveur")
# First get the client id
username = binary_to_dict(connexion_avec_serveur.recv(2 ** 16))["client_id"]
# Second get the message size
msg_recu = connexion_avec_serveur.recv(2 ** 8)
message_size = binary_to_dict(msg_recu)["message_size"]
# Then get the first chunk of history using the number of byte equal to the power of 2 just above its size
msg_recu = connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1))
total_size_received = sys.getsizeof(msg_recu)
# One we get the first chunk, we loop until we get the whole history
while total_size_received < message_size:
msg_recu += connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1))
total_size_received = sys.getsizeof(msg_recu)
msg_decode = binary_to_dict(msg_recu)
hist = msg_decode
# Après réception de l'état du whiteboard, c'est à dire des figures et textboxes déjà dessinées par des utilisateurs
# précédents, le programme lance un whiteboard
whiteboard = WhiteBoard(username, start_config, hist)
whiteboard.start(connexion_avec_serveur)
if __name__ == '__main__':
main()
|
import pygame
import pygame.draw
import json
import sys
from functools import reduce
import operator
from figures import TextBox, draw_line, draw_point, draw_textbox, draw_rect, draw_circle
from tools import Mode, ColorBox, Auth, Save, FontSizeBox, HandlePoint, HandleLine, HandleText, HandleRect, HandleCircle
import copy
'''
Ouverture de la configuration initiale
'''
def dict_to_binary(the_dict):
str = json.dumps(the_dict)
return bytes(str, 'utf-8')
def binary_to_dict(binary):
try:
jsn = ''.join(binary.decode("utf-8"))
d = json.loads(jsn)
except (TypeError, json.decoder.JSONDecodeError) as e:
if e == TypeError:
print("Le message reçu n'est pas du format attendu")
else:
print('Un paquet a été perdu')
return {"actions": [], "message": [], "auth": []}
return d
class WhiteBoard:
def __init__(self, client_name, start_config, start_hist=None):
"""
Whiteboard initialization : we build the GUI using the config file and the potential history of actions made by
other users. Returns a Whiteboard window ready to use.
:param client_name: Name of the client who just opened a new whiteboard window (str)
:param start_config: Whiteboard configuration stored in config.json and loaded as a dict (dict)
:param start_hist: History of actions by other users (dict)
"""
pygame.init()
if not isinstance(client_name, str):
raise TypeError("Client name must be a string")
if not isinstance(start_config, dict):
raise TypeError("Starting configuration file must be a dictionary")
if start_hist is None:
start_hist = {"actions": [], "message": [], "auth": []}
elif not isinstance(start_hist, dict):
raise TypeError("Starting history file must be a dictionary")
self._done = False
self._config = start_config
self._name = client_name
self._hist = start_hist
self.__screen = pygame.display.set_mode([self._config["width"], self._config["length"]])
self.__screen.fill(self._config["board_background_color"])
self.__handler = {"line": HandleLine(self),
"point": HandlePoint(self),
"text": HandleText(self),
"rect": HandleRect(self),
"circle": HandleCircle(self)}
pygame.draw.line(self.__screen, self._config["active_color"], [0, self._config["toolbar_y"]],
[self._config["width"], self._config["toolbar_y"]], 1)
# We create a global variable to keep track of the position of the last mode box we create in order to make
# sure that there is no overlapping between left and right boxes on the toolbar on the toolbar
"""
Tracé de la box auth, qui permet de donner l'autorisation de modification des textbox
"""
last_left_position = 0
last_right_position = self._config["width"] - self._config["mode_box_size"][0]
self._erasing_auth = False
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__auth_box = Auth((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__auth_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Tracé de la boite save qui permet d'enregistrer l'image
"""
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__save_box = Save((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__save_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
self.__modes = [Mode("point", (2 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("line", (3 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("text", (4 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("rect", (5 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("circle", (6 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"]))
]
# If right and left boxes overlap, raise an error and close pygame
try:
for mod in self.__modes:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
mod.add(self.__screen)
last_left_position += self._config["mode_box_size"][0]
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des couleurs
"""
self.__colors = []
try:
for key, value in self._config["color_palette"].items():
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
color_box = ColorBox(value, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__colors.append(color_box)
color_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des épaisseurs
"""
self.__font_sizes = []
try:
for size in self._config["pen_sizes"]:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
font_size_box = FontSizeBox(size, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__font_sizes.append(font_size_box)
font_size_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
initialisation des variables de dessin
"""
pygame.display.flip()
self._draw = False
self._last_pos = None
self._mouse_position = (0, 0)
"""
Initialisation des paramètres des text boxes
"""
self._text_boxes = [] # Cette liste contiendra les objets de type Textbox
self.active_box = None
self.load_actions(self._hist)
self.__modification_allowed = copy.deepcopy(self._hist["auth"])
# if some client names are in this list, you will have the authorisation to edit their textboxes
for action in self._hist["actions"]:
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
"""
Encapsulation
"""
def is_done(self):
return self._done
def end(self):
self._done = True
def get_config(self, maplist):
"""
Getter of config file. Uses a list of keys to traverse the config dict
:param maplist: list of keys from parent to child to get the wanted value (list)
:return: value of a key in the config file (object)
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
return reduce(operator.getitem, maplist, self._config)
except (KeyError, TypeError):
return None
def set_config(self, maplist, value):
"""
Setter of config file. Uses the getter and assigns value to a key
:param maplist: list of keys from parent to child to get the wanted value (list)
:param value: value to set (object)
:return: None if failed
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
self.get_config(maplist[:-1])[maplist[-1]] = value
except (KeyError, TypeError):
return None
def get_hist(self, key=None):
if key is None:
return self._hist
else:
return self._hist[key]
def add_to_hist(self, value):
self._hist["actions"].append(value)
@property
def screen(self):
return self.__screen
def clear_screen(self):
"""
Clear the screen by coloring it to background color. Does not color the toolbar
:return:
"""
self.__screen.fill(self.get_config(["board_background_color"]), (0, self.get_config(["toolbar_y"]) + 1,
self.get_config(["width"]),
self.get_config(["length"]) - self.get_config(
["toolbar_y"]) + 1))
def is_drawing(self):
return self._draw
def pen_up(self):
self._draw = False
def pen_down(self):
self._draw = True
@property
def name(self):
return self._name
@property
def modification_allowed(self):
return self.__modification_allowed
@property
def last_pos(self):
return self._last_pos
def reset_last_pos(self):
self._last_pos = None
def update_last_pos(self):
self._last_pos = self._mouse_position
def __get_mouse_position(self):
return self._mouse_position
def __set_mouse_position(self, value):
self._mouse_position = value
mouse_position = property(__get_mouse_position, __set_mouse_position)
def get_text_boxes(self):
return self._text_boxes
def append_text_box(self, textbox):
self._text_boxes.append(textbox)
def del_text_box(self, textbox):
self._text_boxes.remove(textbox)
def draw(self, obj, timestamp):
"""
Method to draw figures defined in figures.py. Also adds drawn objects to history.
:param obj: class of figure to draw
:param timestamp: timestamp at which the drawing happens
:return: None
"""
# Draw object on screen
obj.draw(self.__screen)
# Create dict containing object parameters and right timestamp to add to history
hist_obj = {"type": obj.type, "timestamp": timestamp, "params": obj.fetch_params(), "client": self._name}
# Special case if it's a Text_box object, we need to get the correct box id
if hist_obj["type"] == "Text_box":
hist_obj["id"] = obj.id_counter
hist_obj["owner"] = self._name
self.add_to_hist(hist_obj)
def switch_config(self, event):
"""
Switch between different modes
:param event: Action by the user : a mouse click on either modes, colors or font sizes
:return: None
"""
if event == "quit":
self.set_config(["mode"], "quit")
# We go through each mode, color and font size to see if that mode should be triggered by the event
else:
for mod in self.__modes:
if mod.is_triggered(event):
self.set_config(["mode"], mod.name)
for col in self.__colors:
if col.is_triggered(event):
self.set_config(["text_box", "text_color"], col.color)
self.set_config(["active_color"], col.color)
for font_size_ in self.__font_sizes:
if font_size_.is_triggered(event):
self.set_config(["font_size"], font_size_.font_size)
if self.__auth_box.is_triggered(event):
self._erasing_auth = not self._erasing_auth
self.__auth_box.switch(self.__screen, self._erasing_auth, self.__modification_allowed, self._name)
self._hist["auth"] = [self._name, self._erasing_auth]
if self.__save_box.is_triggered(event):
self.__save_box.save(self.__screen, self)
print("Le dessin a été sauvegardé dans le dossier")
def set_active_box(self, box, new=True):
"""
A method specific to text boxes : select an existing box or one that has just been created to edit. This box is
thus said to be "active"
:param box: instance of the TextBox class
:param new: boolean to specify if the box was just created or already existed
:return:
"""
# If the selected box is already the active one, do nothing
if box == self.active_box:
return
# If there is a box that is active we must turn it into "inactive"
if self.active_box is not None:
# Change its color to the "inactive color"
self.active_box.set_textbox_color(self.get_config(["text_box", "inactive_color"]))
# Select the id of previous active box
id_counter = self.active_box.id_counter
# Find the previous active box and change its color in history
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action["params"]["text"] = self.active_box.get_textbox_text()
action['params']["box_color"] = self.get_config(["text_box", "inactive_color"])
# Render it
self.active_box.draw(self.__screen)
# If selected box already exists on the whiteboard we must turn it into "active"
if not new:
id_counter = box.id_counter
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action['params']["box_color"] = self.get_config(["text_box", "active_color"])
# Draw the newly activated box
self.active_box = box
self.active_box.draw(self.__screen)
pygame.display.flip()
def draw_action(self, action):
"""
Draw the result of an action by the user on the whiteboard
:param action: usually a mouse action by the user
:return:
"""
if action["type"] == "Point":
draw_point(action["params"], self.__screen)
if action["type"] == "Line":
draw_line(action["params"], self.__screen)
if action["type"] == "Text_box":
draw_textbox(action["params"], self.__screen)
if action["type"] == "rect":
draw_rect(action["params"], self.__screen)
if action["type"] == "circle":
draw_circle(action["params"], self.__screen)
def load_actions(self, hist):
"""
Load actions from history
:param hist: list of dict representing the history of actions in the whiteboard session
:return:
"""
# Sort actions chronologically
sred = sorted(hist["actions"],
key=lambda value: value["timestamp"])
# Go through each action and draw it
for action in sred:
self.draw_action(action)
pygame.display.flip()
def start(self, connexion_avec_serveur):
"""
Start and run a whiteboard window
:param connexion_avec_serveur: socket to connect with server (socket.socket)
:return:
"""
# Initialize timestamp
last_timestamp_sent = 0
while not self.is_done():
# Browse all events done by user
for event in pygame.event.get():
# If user closes the window, quit the whiteboard
if self.get_config(["mode"]) == "quit":
self.end()
break
# Use specific handling method for current drawing mode
self.__handler[self.get_config(["mode"])].handle_all(event)
# msg_a_envoyer["message"] = "CARRY ON"
# Send dict history to server
if self._hist["auth"] != [self._name, self._erasing_auth]:
self._hist["auth"] = []
new_modifs = [modif for modif in self.get_hist()["actions"] if
(modif["timestamp"] > last_timestamp_sent and self._name == modif["client"])]
message_a_envoyer = {"message": "", 'actions': new_modifs, "auth": self._hist["auth"]}
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
self._hist["auth"] = []
# Update last timestamp sent
if new_modifs:
last_timestamp_sent = max([modif["timestamp"] for modif in new_modifs])
# Dict received from server
try:
new_hist = binary_to_dict(connexion_avec_serveur.recv(2 ** 24))
except (ConnectionResetError, ConnectionAbortedError) as e:
print("Le serveur a été éteint, veuillez le relancer")
self._done = True
pass
# Consider actions made by another client after new_last_timestamp
new_actions = [action for action in new_hist["actions"] if action["client"] != self._name]
for action in new_actions:
# Here there are two cases, a new figure (point, line, rect, circle, new text box) is created or an
# existing text box is modified. For this second case, we use the variable "matched" as indicator
matched = False
if action["type"] == "Text_box":
# Find the text box id
for textbox in [x for x in self._hist["actions"] if x["type"] == "Text_box"]:
if action["id"] == textbox["id"]:
# Modify it with the newly acquired parameters from server
textbox["params"]["text"], textbox["params"]["w"] = action["params"]["text"], \
action["params"]["w"]
action_to_update_textbox = action
for element in self.get_text_boxes():
if element.id_counter == action["id"]:
self.del_text_box(element)
self.append_text_box(TextBox(**action_to_update_textbox["params"]))
# Draw the modified text box with updated parameters
self.clear_screen()
self.load_actions(self._hist)
matched = True
# If we are in the first case, we add the new actions to history and draw them
if not matched:
self.add_to_hist(action)
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
self.draw_action(action)
if self._name in new_hist["auth"]:
new_hist["auth"].remove(self._name)
if new_hist["auth"] != self.__modification_allowed:
self.__modification_allowed = copy.deepcopy(new_hist["auth"])
pygame.display.flip()
# Once we are done, we quit pygame and send end message
pygame.quit()
print("Fermeture de la connexion")
message_a_envoyer["message"] = "END"
try:
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
except (ConnectionResetError, BrokenPipeError) as e:
print("Il n'y a pas de message à envoyer au serveur")
connexion_avec_serveur.close()
def start_local(self):
"""
Starts Whiteboard locally. Used to test stuff and debug.
:return:
"""
while not self.is_done():
for event in pygame.event.get():
if self.get_config(["mode"]) == "quit":
self.end()
break
self.__handler[self.get_config(["mode"])].handle_all(event)
pygame.display.flip()
pygame.quit()
|
{
"imported_by": [],
"imports": [
"/src/white_board.py"
]
}
|
riadghorra/whiteboard-oop-project
|
/src/main.py
|
from white_board import WhiteBoard
import json
'''
This file is used to run locally or to debug
'''
with open('config.json') as json_file:
start_config = json.load(json_file)
def main():
board = WhiteBoard("client", start_config)
board.start_local()
if __name__ == '__main__':
main()
|
import pygame
import pygame.draw
import json
import sys
from functools import reduce
import operator
from figures import TextBox, draw_line, draw_point, draw_textbox, draw_rect, draw_circle
from tools import Mode, ColorBox, Auth, Save, FontSizeBox, HandlePoint, HandleLine, HandleText, HandleRect, HandleCircle
import copy
'''
Ouverture de la configuration initiale
'''
def dict_to_binary(the_dict):
str = json.dumps(the_dict)
return bytes(str, 'utf-8')
def binary_to_dict(binary):
try:
jsn = ''.join(binary.decode("utf-8"))
d = json.loads(jsn)
except (TypeError, json.decoder.JSONDecodeError) as e:
if e == TypeError:
print("Le message reçu n'est pas du format attendu")
else:
print('Un paquet a été perdu')
return {"actions": [], "message": [], "auth": []}
return d
class WhiteBoard:
def __init__(self, client_name, start_config, start_hist=None):
"""
Whiteboard initialization : we build the GUI using the config file and the potential history of actions made by
other users. Returns a Whiteboard window ready to use.
:param client_name: Name of the client who just opened a new whiteboard window (str)
:param start_config: Whiteboard configuration stored in config.json and loaded as a dict (dict)
:param start_hist: History of actions by other users (dict)
"""
pygame.init()
if not isinstance(client_name, str):
raise TypeError("Client name must be a string")
if not isinstance(start_config, dict):
raise TypeError("Starting configuration file must be a dictionary")
if start_hist is None:
start_hist = {"actions": [], "message": [], "auth": []}
elif not isinstance(start_hist, dict):
raise TypeError("Starting history file must be a dictionary")
self._done = False
self._config = start_config
self._name = client_name
self._hist = start_hist
self.__screen = pygame.display.set_mode([self._config["width"], self._config["length"]])
self.__screen.fill(self._config["board_background_color"])
self.__handler = {"line": HandleLine(self),
"point": HandlePoint(self),
"text": HandleText(self),
"rect": HandleRect(self),
"circle": HandleCircle(self)}
pygame.draw.line(self.__screen, self._config["active_color"], [0, self._config["toolbar_y"]],
[self._config["width"], self._config["toolbar_y"]], 1)
# We create a global variable to keep track of the position of the last mode box we create in order to make
# sure that there is no overlapping between left and right boxes on the toolbar on the toolbar
"""
Tracé de la box auth, qui permet de donner l'autorisation de modification des textbox
"""
last_left_position = 0
last_right_position = self._config["width"] - self._config["mode_box_size"][0]
self._erasing_auth = False
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__auth_box = Auth((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__auth_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Tracé de la boite save qui permet d'enregistrer l'image
"""
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__save_box = Save((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__save_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
self.__modes = [Mode("point", (2 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("line", (3 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("text", (4 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("rect", (5 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("circle", (6 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"]))
]
# If right and left boxes overlap, raise an error and close pygame
try:
for mod in self.__modes:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
mod.add(self.__screen)
last_left_position += self._config["mode_box_size"][0]
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des couleurs
"""
self.__colors = []
try:
for key, value in self._config["color_palette"].items():
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
color_box = ColorBox(value, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__colors.append(color_box)
color_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des épaisseurs
"""
self.__font_sizes = []
try:
for size in self._config["pen_sizes"]:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
font_size_box = FontSizeBox(size, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__font_sizes.append(font_size_box)
font_size_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
initialisation des variables de dessin
"""
pygame.display.flip()
self._draw = False
self._last_pos = None
self._mouse_position = (0, 0)
"""
Initialisation des paramètres des text boxes
"""
self._text_boxes = [] # Cette liste contiendra les objets de type Textbox
self.active_box = None
self.load_actions(self._hist)
self.__modification_allowed = copy.deepcopy(self._hist["auth"])
# if some client names are in this list, you will have the authorisation to edit their textboxes
for action in self._hist["actions"]:
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
"""
Encapsulation
"""
def is_done(self):
return self._done
def end(self):
self._done = True
def get_config(self, maplist):
"""
Getter of config file. Uses a list of keys to traverse the config dict
:param maplist: list of keys from parent to child to get the wanted value (list)
:return: value of a key in the config file (object)
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
return reduce(operator.getitem, maplist, self._config)
except (KeyError, TypeError):
return None
def set_config(self, maplist, value):
"""
Setter of config file. Uses the getter and assigns value to a key
:param maplist: list of keys from parent to child to get the wanted value (list)
:param value: value to set (object)
:return: None if failed
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
self.get_config(maplist[:-1])[maplist[-1]] = value
except (KeyError, TypeError):
return None
def get_hist(self, key=None):
if key is None:
return self._hist
else:
return self._hist[key]
def add_to_hist(self, value):
self._hist["actions"].append(value)
@property
def screen(self):
return self.__screen
def clear_screen(self):
"""
Clear the screen by coloring it to background color. Does not color the toolbar
:return:
"""
self.__screen.fill(self.get_config(["board_background_color"]), (0, self.get_config(["toolbar_y"]) + 1,
self.get_config(["width"]),
self.get_config(["length"]) - self.get_config(
["toolbar_y"]) + 1))
def is_drawing(self):
return self._draw
def pen_up(self):
self._draw = False
def pen_down(self):
self._draw = True
@property
def name(self):
return self._name
@property
def modification_allowed(self):
return self.__modification_allowed
@property
def last_pos(self):
return self._last_pos
def reset_last_pos(self):
self._last_pos = None
def update_last_pos(self):
self._last_pos = self._mouse_position
def __get_mouse_position(self):
return self._mouse_position
def __set_mouse_position(self, value):
self._mouse_position = value
mouse_position = property(__get_mouse_position, __set_mouse_position)
def get_text_boxes(self):
return self._text_boxes
def append_text_box(self, textbox):
self._text_boxes.append(textbox)
def del_text_box(self, textbox):
self._text_boxes.remove(textbox)
def draw(self, obj, timestamp):
"""
Method to draw figures defined in figures.py. Also adds drawn objects to history.
:param obj: class of figure to draw
:param timestamp: timestamp at which the drawing happens
:return: None
"""
# Draw object on screen
obj.draw(self.__screen)
# Create dict containing object parameters and right timestamp to add to history
hist_obj = {"type": obj.type, "timestamp": timestamp, "params": obj.fetch_params(), "client": self._name}
# Special case if it's a Text_box object, we need to get the correct box id
if hist_obj["type"] == "Text_box":
hist_obj["id"] = obj.id_counter
hist_obj["owner"] = self._name
self.add_to_hist(hist_obj)
def switch_config(self, event):
"""
Switch between different modes
:param event: Action by the user : a mouse click on either modes, colors or font sizes
:return: None
"""
if event == "quit":
self.set_config(["mode"], "quit")
# We go through each mode, color and font size to see if that mode should be triggered by the event
else:
for mod in self.__modes:
if mod.is_triggered(event):
self.set_config(["mode"], mod.name)
for col in self.__colors:
if col.is_triggered(event):
self.set_config(["text_box", "text_color"], col.color)
self.set_config(["active_color"], col.color)
for font_size_ in self.__font_sizes:
if font_size_.is_triggered(event):
self.set_config(["font_size"], font_size_.font_size)
if self.__auth_box.is_triggered(event):
self._erasing_auth = not self._erasing_auth
self.__auth_box.switch(self.__screen, self._erasing_auth, self.__modification_allowed, self._name)
self._hist["auth"] = [self._name, self._erasing_auth]
if self.__save_box.is_triggered(event):
self.__save_box.save(self.__screen, self)
print("Le dessin a été sauvegardé dans le dossier")
def set_active_box(self, box, new=True):
"""
A method specific to text boxes : select an existing box or one that has just been created to edit. This box is
thus said to be "active"
:param box: instance of the TextBox class
:param new: boolean to specify if the box was just created or already existed
:return:
"""
# If the selected box is already the active one, do nothing
if box == self.active_box:
return
# If there is a box that is active we must turn it into "inactive"
if self.active_box is not None:
# Change its color to the "inactive color"
self.active_box.set_textbox_color(self.get_config(["text_box", "inactive_color"]))
# Select the id of previous active box
id_counter = self.active_box.id_counter
# Find the previous active box and change its color in history
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action["params"]["text"] = self.active_box.get_textbox_text()
action['params']["box_color"] = self.get_config(["text_box", "inactive_color"])
# Render it
self.active_box.draw(self.__screen)
# If selected box already exists on the whiteboard we must turn it into "active"
if not new:
id_counter = box.id_counter
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action['params']["box_color"] = self.get_config(["text_box", "active_color"])
# Draw the newly activated box
self.active_box = box
self.active_box.draw(self.__screen)
pygame.display.flip()
def draw_action(self, action):
"""
Draw the result of an action by the user on the whiteboard
:param action: usually a mouse action by the user
:return:
"""
if action["type"] == "Point":
draw_point(action["params"], self.__screen)
if action["type"] == "Line":
draw_line(action["params"], self.__screen)
if action["type"] == "Text_box":
draw_textbox(action["params"], self.__screen)
if action["type"] == "rect":
draw_rect(action["params"], self.__screen)
if action["type"] == "circle":
draw_circle(action["params"], self.__screen)
def load_actions(self, hist):
"""
Load actions from history
:param hist: list of dict representing the history of actions in the whiteboard session
:return:
"""
# Sort actions chronologically
sred = sorted(hist["actions"],
key=lambda value: value["timestamp"])
# Go through each action and draw it
for action in sred:
self.draw_action(action)
pygame.display.flip()
def start(self, connexion_avec_serveur):
"""
Start and run a whiteboard window
:param connexion_avec_serveur: socket to connect with server (socket.socket)
:return:
"""
# Initialize timestamp
last_timestamp_sent = 0
while not self.is_done():
# Browse all events done by user
for event in pygame.event.get():
# If user closes the window, quit the whiteboard
if self.get_config(["mode"]) == "quit":
self.end()
break
# Use specific handling method for current drawing mode
self.__handler[self.get_config(["mode"])].handle_all(event)
# msg_a_envoyer["message"] = "CARRY ON"
# Send dict history to server
if self._hist["auth"] != [self._name, self._erasing_auth]:
self._hist["auth"] = []
new_modifs = [modif for modif in self.get_hist()["actions"] if
(modif["timestamp"] > last_timestamp_sent and self._name == modif["client"])]
message_a_envoyer = {"message": "", 'actions': new_modifs, "auth": self._hist["auth"]}
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
self._hist["auth"] = []
# Update last timestamp sent
if new_modifs:
last_timestamp_sent = max([modif["timestamp"] for modif in new_modifs])
# Dict received from server
try:
new_hist = binary_to_dict(connexion_avec_serveur.recv(2 ** 24))
except (ConnectionResetError, ConnectionAbortedError) as e:
print("Le serveur a été éteint, veuillez le relancer")
self._done = True
pass
# Consider actions made by another client after new_last_timestamp
new_actions = [action for action in new_hist["actions"] if action["client"] != self._name]
for action in new_actions:
# Here there are two cases, a new figure (point, line, rect, circle, new text box) is created or an
# existing text box is modified. For this second case, we use the variable "matched" as indicator
matched = False
if action["type"] == "Text_box":
# Find the text box id
for textbox in [x for x in self._hist["actions"] if x["type"] == "Text_box"]:
if action["id"] == textbox["id"]:
# Modify it with the newly acquired parameters from server
textbox["params"]["text"], textbox["params"]["w"] = action["params"]["text"], \
action["params"]["w"]
action_to_update_textbox = action
for element in self.get_text_boxes():
if element.id_counter == action["id"]:
self.del_text_box(element)
self.append_text_box(TextBox(**action_to_update_textbox["params"]))
# Draw the modified text box with updated parameters
self.clear_screen()
self.load_actions(self._hist)
matched = True
# If we are in the first case, we add the new actions to history and draw them
if not matched:
self.add_to_hist(action)
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
self.draw_action(action)
if self._name in new_hist["auth"]:
new_hist["auth"].remove(self._name)
if new_hist["auth"] != self.__modification_allowed:
self.__modification_allowed = copy.deepcopy(new_hist["auth"])
pygame.display.flip()
# Once we are done, we quit pygame and send end message
pygame.quit()
print("Fermeture de la connexion")
message_a_envoyer["message"] = "END"
try:
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
except (ConnectionResetError, BrokenPipeError) as e:
print("Il n'y a pas de message à envoyer au serveur")
connexion_avec_serveur.close()
def start_local(self):
"""
Starts Whiteboard locally. Used to test stuff and debug.
:return:
"""
while not self.is_done():
for event in pygame.event.get():
if self.get_config(["mode"]) == "quit":
self.end()
break
self.__handler[self.get_config(["mode"])].handle_all(event)
pygame.display.flip()
pygame.quit()
|
{
"imported_by": [],
"imports": [
"/src/white_board.py"
]
}
|
riadghorra/whiteboard-oop-project
|
/src/tools.py
|
"""
Module contenant les differents outils de gestion du tableau
"""
import pygame
import pygame.draw
from datetime import datetime
from figures import Point, Line, TextBox, Rectangle, Circle
import time
# =============================================================================
# classes de gestion des changements de parametres utilisateur
# =============================================================================
class TriggerBox:
"""
Classe mere abstraite qui represente une zone carree de l'ecran sur laquelle on peut cliquer
top_left (list) : coordonees du pixel en haut a gauche
size (int) : taille en pixel du cote du carre
"""
def __init__(self, top_left, size):
self.rect = pygame.Rect(top_left, size)
self.coords = top_left
def is_triggered(self, event):
"""
retourne le booleen : l'utilisateur clique sur la triggerbox
event (pygame event) : clic de souris d un utilisateur
"""
return self.rect.collidepoint(event.pos)
class Auth(TriggerBox):
"""
Classe d'un bouton qui change l'autorisation de modification
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la authbox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
font = pygame.font.Font(None, 18)
legend = {"text": font.render("auth", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def switch(self, screen, erasing_auth, modification_allowed, name):
if erasing_auth:
pygame.draw.circle(screen, [0, 255, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a donné son autorisation de modifications".format(name))
else:
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a retiré son autorisation de modifications".format(name))
return [name, erasing_auth]
class Save(TriggerBox):
"""
Classe d'un bouton qui permet la sauvegarde du whiteboard en format PNG
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la savebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render("save", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def save(self, screen, whiteboard):
pygame.image.save(screen.subsurface((0, whiteboard.get_config(["toolbar_y"]) + 1,
whiteboard.get_config(["width"]),
whiteboard.get_config(["length"]) - whiteboard.get_config(
["toolbar_y"]) - 1)), "mygreatdrawing.png")
class Mode(TriggerBox):
"""
Classe d'un mode de dessin du tableau dans lequel on peut rentrer via la triggerbox dont il herite
name (string) : nom du mode qui sera inscrit dans sa triggerbox sur l'ecran
"""
def __init__(self, name, top_left, size):
super(Mode, self).__init__(top_left, size)
self.name = name
def add(self, screen):
"""
Dessine la triggerbox du mode et la rend active sur l'ecran
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render(self.name, True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
class ColorBox(TriggerBox):
"""
Classe d'une triggerbox de choix de couleur sur l'ecran
color (list) : color of the box
"""
def __init__(self, color, top_left, size):
super(ColorBox, self).__init__(top_left, size)
self.color = color
def add(self, screen):
"""
Dessine la colorbox
"""
pygame.draw.rect(screen, self.color, self.rect)
class FontSizeBox(TriggerBox):
"""
Classe des triggerbox de choix de l'epaisseur du trait
font_size (int) : epaisseur du trait en pixel
"""
def __init__(self, font_size, top_left, size):
super(FontSizeBox, self).__init__(top_left, size)
self.font_size = font_size
self.center = [top_left[0] + size[0] // 2,
top_left[1] + size[1] // 2] # pour dessiner un cercle representant l epaisseur de selection
def add(self, screen):
"""
Dessine la fontsizebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [0, 0, 0], self.center, self.font_size)
# =============================================================================
# classes de gestion des evenements utilisateur
# =============================================================================
class EventHandler:
"""
Classe mere des gestionnaires d'evenements utilisateur en fontcion des modes
whiteboard : classe whiteboard sur laquelle notre handler va gerer les evenements utilisateur
"""
def __init__(self, whiteboard):
self.whiteboard = whiteboard
def handle(self, event):
"""
Ce test commun a tous les modes verifie si l'utilisateur quitte ou change de mode
"""
out = False
if event.type == pygame.QUIT:
self.whiteboard.end()
self.whiteboard.switch_config("quit")
out = True
if event.type == pygame.MOUSEBUTTONDOWN:
coord = event.dict['pos']
if coord[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.switch_config(event)
out = True
return out
class HandlePoint(EventHandler):
"""
Classe du gestionnaire d'evenement en mode point
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_all(self, event):
"""
En mode point on s'interesse aux clics gauches de souris et on dessine un point
"""
handled = self.handle(event)
# commun a tous les handler qui verifie si on change de mode ou on quitte
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
if event.dict["button"] != 1:
return
coord = event.dict["pos"]
to_draw = Point(coord,
self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["font_size"]), self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
class HandleLine(EventHandler):
"""
Classe du gestionnaire d'evenement en mode ligne
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_mouse_motion(self):
"""
Gere les mouvements de souris : l'utilisateur a le clic enfonce le rendu du trait est en direct
"""
if self.whiteboard.is_drawing():
self.whiteboard.mouse_position = pygame.mouse.get_pos()
if self.whiteboard.mouse_position[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.pen_up()
elif self.whiteboard.last_pos is not None:
to_draw = Line(self.whiteboard.get_config(["active_color"]), self.whiteboard.last_pos,
self.whiteboard.mouse_position,
self.whiteboard.get_config(["font_size"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.whiteboard.update_last_pos()
def handle_mouse_button_up(self):
"""
Gere la levee du doigt sur le clic : on effectue un pen up
"""
self.whiteboard.mouse_position = (0, 0)
self.whiteboard.pen_up()
self.whiteboard.reset_last_pos()
def handle_mouse_button_down(self):
"""
Gere le clic de l'utilisateur : pen down
"""
self.whiteboard.pen_down()
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEMOTION:
self.handle_mouse_motion()
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down()
pygame.display.flip()
class HandleText(EventHandler):
"""
Classe du gestionnaire d'evenement en mode textbox
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def box_selection(self, event):
"""
Gere les clics utilisateur
S'il s'agit d'un clic droit, on cree une nouvelle box
S'il s'agit d'un clic gauche on regarde si cela selectionne une zone d une ancienne box qui deviendra la box
active
"""
if event.dict["button"] == 3:
coord = event.dict['pos']
text_box = TextBox(*coord, self.whiteboard.get_config(["text_box", "textbox_width"]),
self.whiteboard.get_config(["text_box", "textbox_length"]),
self.whiteboard.get_config(["text_box", "active_color"]),
self.whiteboard.get_config(["text_box", "font"]),
self.whiteboard.get_config(["text_box", "font_size"]), "",
self.whiteboard.get_config(["active_color"]))
self.whiteboard.append_text_box(text_box)
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(text_box, timestamp)
self.whiteboard.set_active_box(text_box)
elif event.dict["button"] == 1:
for box in self.whiteboard.get_text_boxes():
if box.rect.collidepoint(event.pos):
self.whiteboard.set_active_box(box, new=False)
def write_in_box(self, event):
"""
Gere les entrees clavier de l'utilisateur
Si une box est selectionnee cela modifie le texte en consequence
"""
if self.whiteboard.active_box is not None:
# on efface un caractere
if event.key == pygame.K_BACKSPACE:
self.whiteboard.active_box.delete_char_from_text(self.whiteboard)
# pour modifier la box il est malheureusement necessaire de re-render tout le tableau
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
elif event.key == pygame.K_TAB or event.key == pygame.K_RETURN:
pass
else:
self.whiteboard.active_box.add_character_to_text(event.unicode, self.whiteboard)
# on re-render tout aussi ici pour éviter de superposer des écritures
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
if self.whiteboard.active_box is not None:
# Re-render the text.
self.whiteboard.active_box.set_txt_surface(self.whiteboard.active_box.render_font(
self.whiteboard.active_box.get_textbox_text(),
self.whiteboard.active_box.get_textbox_color()))
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associée via un arbre de if
"""
handled = self.handle(event)
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
self.box_selection(event)
if event.type == pygame.KEYDOWN:
self.write_in_box(event)
pygame.display.flip()
class HandleRect(EventHandler):
"""
Classe du gestionnaire d'evenement en mode rectangle
Nous avons decidé de faire un systeme de clic drag pour tracer un rectangle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.c1 = None
def handle_mouse_button_up(self, coord):
"""
Recupere la deuxieme coordonee d'un coin du rectangle a tracer quand l'utilisateur arrete de cliquer
"""
if self.c1 is not None:
coord = list(coord)
# on ne veut pas depasser sur la toolbar
coord[1] = max(self.whiteboard.get_config(["toolbar_y"]), coord[1])
to_draw = Rectangle(self.c1, coord, self.whiteboard.get_config(["active_color"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.c1 = None
def handle_mouse_button_down(self, event):
"""
Recupere une coordonee d'un coin du rectangle a tracer quand l'utilisateur démarre un clic
"""
if event.dict["button"] != 1:
return
self.c1 = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
class HandleCircle(EventHandler):
"""
Classe du gestionnaire d'evenement en mode Cercle
Nous avons decidé de faire un systeme de clic drag la-encore pour tracer un cercle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.center = None
def handle_mouse_button_up(self, coord):
"""
Recupere la coordonee d'un point sur le cercle quand l'utilisateur arrete de cliquer
"""
if self.center is not None:
coord = list(coord)
to_draw = Circle(self.center, coord, self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.center = None
def handle_mouse_button_down(self, event):
"""
Recupere la coordonnee du centre du cercle quand l'utilisateur demarre un clic
"""
if event.dict["button"] != 1:
return
self.center = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
|
"""
Module contenant toutes les figures et opérations de base
"""
import pygame
import pygame.draw
from datetime import datetime
def distance(v1, v2):
"""
Calcule la distance euclidienne entre deux vecteurs
"""
try:
return ((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2) ** 0.5
except TypeError:
return "Ce ne sont pas des vecteurs"
class Figure:
def __init__(self):
pass
def draw(self):
pass
def fetch_params(self):
pass
class Point(Figure):
"""
Classe d'un point prêt à être tracé sur le tableau
coord (list) : coordonées
point_color (list) : couleur en RGB
font_size (int) : epaisseur en pixels
toolbar_size (int) : epaisseur de la toolbar en haut du tableau sur laquelle on ne veut pas que le point depasse
"""
def __init__(self, coord, point_color, font_size, toolbar_size=0):
Figure.__init__(self)
self.point_color = point_color
self.font_size = font_size
# used to not write on the toolbar if the font size is big
self.coord = [coord[0], max(coord[1], toolbar_size + font_size + 1)]
self.type = "Point"
def draw(self, screen):
"""
Dessine le point sur l'ecran
"""
pygame.draw.circle(screen, self.point_color, self.coord, self.font_size)
pygame.display.flip()
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"coord": self.coord, "point_color": self.point_color, "font_size": self.font_size}
class Line(Figure):
"""
Classe d'une ligne droite
line_color (list) : couleur de la ligne en RGB
start_pos (list): coordonee du debut de la ligne droite
end_pos (list) : coordonee de la fin de la ligne droite
font_size (int): epaisseur
"""
def __init__(self, line_color, start_pos, end_pos, font_size):
Figure.__init__(self)
self.line_color = line_color
self.start_pos = start_pos
self.end_pos = end_pos
self.font_size = font_size
self.type = "Line"
def draw(self, screen):
"""
Dessine la ligne sur l'ecran
"""
pygame.draw.line(screen, self.line_color, self.start_pos, self.end_pos, self.font_size)
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"line_color": self.line_color, "start_pos": self.start_pos, "end_pos": self.end_pos,
"font_size": self.font_size}
class Rectangle(Figure):
"""
Classe d un rectangle
color (list) : couleur du rectangle
left, right (int) : coordonees d'absice a gauche, droite du rectangle
bottom, top (int) : coordonees d'ordonnee en haut et en bas du rectangle
"""
def __init__(self, c1, c2, color):
"""
On definit les parametres du rectangle a partir des coordonees de deux coins
c1, c2 (lists): coordonees de deux coins du rectangle
"""
Figure.__init__(self)
self.c1 = c1
self.c2 = c2
self.color = color
# on recupere left avec le min des abscisses et on fait pareil pour right top et bottom
self.left = min(c1[0], c2[0])
self.top = min(c1[1], c2[1])
self.right = max(c1[0], c2[0])
self.bottom = max(c1[1], c2[1])
self.width = self.right - self.left
self.length = self.bottom - self.top
self.rect = pygame.Rect(self.left, self.top, self.width, self.length)
self.type = "rect"
def draw(self, screen):
"""
Dessine le rectangle sur l'ecran
"""
pygame.draw.rect(screen, self.color, self.rect, 0)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"c1": self.c1, "c2": self.c2, "color": self.color}
class Circle(Figure):
"""
Classe d un cercle
center (list) : les coordonees du centre
extremity (list) : les coordonees d'une extremite
color (list) : couleur
toolbar_size (int) : la taille de la toolbar en pixel pour ne pas dessiner dessus
radius (int) : rayon
"""
def __init__(self, center, extremity, color, toolbar_size=0):
Figure.__init__(self)
self.center = center
# on ne veut pas depasser sur la toolbar donc on reduit le rayon
self.radius = min(int(distance(center, extremity)), center[1] - toolbar_size - 1)
self.extremity = [center[0] + self.radius, center[1]]
self.color = color
self.type = "circle"
def draw(self, screen):
"""
dessine le cercle sur l ecran
"""
pygame.draw.circle(screen, self.color, self.center, self.radius)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"center": self.center, "extremity": self.extremity, "color": self.color}
class TextBox(Figure):
"""
Classe d une textbox
x, y (int) : l'abscisse a gauche et l'ordonee a droite de la textbox ie (x,y) est le topleft
w (int) : longueur de la textbox
h (int) : hauteur de la textbox
box_color (list) : couleur du contour de la box
font (string) : police du texte
font_size (int) : taille des caracteres
text (string) : texte de la texbox
text_color (list) : couleur du texte
"""
def __init__(self, x, y, w, h, box_color, font, font_size, text, text_color):
Figure.__init__(self)
self.__rect = pygame.Rect(x, y, w, h)
self._color = box_color
self._text = text
self._font = font
self._font_size = font_size
self._sysfont = pygame.font.SysFont(font, font_size)
self._text_color = text_color
self._txt_surface = self._sysfont.render(text, True, self._text_color)
self.id_counter = str(x) + "_" + str(y)
self.type = "Text_box"
"""
Encapsulation
"""
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"x": self.__rect.x, "y": self.__rect.y, "w": self.__rect.w, "h": self.__rect.h,
"box_color": self._color, "font": self._font, "font_size": self._font_size, "text": self._text,
"text_color": self._text_color}
def get_textbox_color(self):
return self._color
def set_textbox_color(self, new_color):
self._color = new_color
def get_textbox_text(self):
return self._text
def add_character_to_text(self, char, whiteboard):
"""
rajoute un caractere au texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text += char
action['params']["text"] = whiteboard.active_box.get_textbox_text()
action['params']["w"] = whiteboard.active_box.update()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def delete_char_from_text(self, whiteboard):
"""
efface le dernier caractere du texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text = self._text[:-1]
action['params']["text"] = whiteboard.active_box.get_textbox_text()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def render_font(self, text, color, antialias=True):
"""
effectue le rendu du texte
"""
return self._sysfont.render(text, antialias, color)
def set_txt_surface(self, value):
self._txt_surface = value
@property
def rect(self):
return self.__rect
def update(self):
"""
Change la taille du rectangle de contour si le texte est trop long
"""
width = max(140, self._txt_surface.get_width() + 20)
self.__rect.w = width
return width
def draw(self, screen):
"""
dessine la textbox
"""
# Blit le texte
screen.blit(self._txt_surface, (self.__rect.x + 5, self.__rect.y + 5))
# Blit le rectangle
pygame.draw.rect(screen, self._color, self.__rect, 2)
# =============================================================================
# fonction de dessins instantanees
# =============================================================================
def draw_point(params, screen):
"""
dessine un point sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Point(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_line(params, screen):
"""
dessine une ligne sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Line(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_textbox(params, screen):
"""
dessine une textbox sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return TextBox(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_rect(params, screen):
"""
dessine un rectangle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Rectangle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_circle(params, screen):
"""
dessine un cercle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Circle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
|
{
"imported_by": [
"/src/white_board.py"
],
"imports": [
"/src/figures.py"
]
}
|
riadghorra/whiteboard-oop-project
|
/src/white_board.py
|
import pygame
import pygame.draw
import json
import sys
from functools import reduce
import operator
from figures import TextBox, draw_line, draw_point, draw_textbox, draw_rect, draw_circle
from tools import Mode, ColorBox, Auth, Save, FontSizeBox, HandlePoint, HandleLine, HandleText, HandleRect, HandleCircle
import copy
'''
Ouverture de la configuration initiale
'''
def dict_to_binary(the_dict):
str = json.dumps(the_dict)
return bytes(str, 'utf-8')
def binary_to_dict(binary):
try:
jsn = ''.join(binary.decode("utf-8"))
d = json.loads(jsn)
except (TypeError, json.decoder.JSONDecodeError) as e:
if e == TypeError:
print("Le message reçu n'est pas du format attendu")
else:
print('Un paquet a été perdu')
return {"actions": [], "message": [], "auth": []}
return d
class WhiteBoard:
def __init__(self, client_name, start_config, start_hist=None):
"""
Whiteboard initialization : we build the GUI using the config file and the potential history of actions made by
other users. Returns a Whiteboard window ready to use.
:param client_name: Name of the client who just opened a new whiteboard window (str)
:param start_config: Whiteboard configuration stored in config.json and loaded as a dict (dict)
:param start_hist: History of actions by other users (dict)
"""
pygame.init()
if not isinstance(client_name, str):
raise TypeError("Client name must be a string")
if not isinstance(start_config, dict):
raise TypeError("Starting configuration file must be a dictionary")
if start_hist is None:
start_hist = {"actions": [], "message": [], "auth": []}
elif not isinstance(start_hist, dict):
raise TypeError("Starting history file must be a dictionary")
self._done = False
self._config = start_config
self._name = client_name
self._hist = start_hist
self.__screen = pygame.display.set_mode([self._config["width"], self._config["length"]])
self.__screen.fill(self._config["board_background_color"])
self.__handler = {"line": HandleLine(self),
"point": HandlePoint(self),
"text": HandleText(self),
"rect": HandleRect(self),
"circle": HandleCircle(self)}
pygame.draw.line(self.__screen, self._config["active_color"], [0, self._config["toolbar_y"]],
[self._config["width"], self._config["toolbar_y"]], 1)
# We create a global variable to keep track of the position of the last mode box we create in order to make
# sure that there is no overlapping between left and right boxes on the toolbar on the toolbar
"""
Tracé de la box auth, qui permet de donner l'autorisation de modification des textbox
"""
last_left_position = 0
last_right_position = self._config["width"] - self._config["mode_box_size"][0]
self._erasing_auth = False
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__auth_box = Auth((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__auth_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Tracé de la boite save qui permet d'enregistrer l'image
"""
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__save_box = Save((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__save_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
self.__modes = [Mode("point", (2 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("line", (3 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("text", (4 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("rect", (5 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("circle", (6 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"]))
]
# If right and left boxes overlap, raise an error and close pygame
try:
for mod in self.__modes:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
mod.add(self.__screen)
last_left_position += self._config["mode_box_size"][0]
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des couleurs
"""
self.__colors = []
try:
for key, value in self._config["color_palette"].items():
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
color_box = ColorBox(value, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__colors.append(color_box)
color_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des épaisseurs
"""
self.__font_sizes = []
try:
for size in self._config["pen_sizes"]:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
font_size_box = FontSizeBox(size, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__font_sizes.append(font_size_box)
font_size_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
initialisation des variables de dessin
"""
pygame.display.flip()
self._draw = False
self._last_pos = None
self._mouse_position = (0, 0)
"""
Initialisation des paramètres des text boxes
"""
self._text_boxes = [] # Cette liste contiendra les objets de type Textbox
self.active_box = None
self.load_actions(self._hist)
self.__modification_allowed = copy.deepcopy(self._hist["auth"])
# if some client names are in this list, you will have the authorisation to edit their textboxes
for action in self._hist["actions"]:
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
"""
Encapsulation
"""
def is_done(self):
return self._done
def end(self):
self._done = True
def get_config(self, maplist):
"""
Getter of config file. Uses a list of keys to traverse the config dict
:param maplist: list of keys from parent to child to get the wanted value (list)
:return: value of a key in the config file (object)
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
return reduce(operator.getitem, maplist, self._config)
except (KeyError, TypeError):
return None
def set_config(self, maplist, value):
"""
Setter of config file. Uses the getter and assigns value to a key
:param maplist: list of keys from parent to child to get the wanted value (list)
:param value: value to set (object)
:return: None if failed
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
self.get_config(maplist[:-1])[maplist[-1]] = value
except (KeyError, TypeError):
return None
def get_hist(self, key=None):
if key is None:
return self._hist
else:
return self._hist[key]
def add_to_hist(self, value):
self._hist["actions"].append(value)
@property
def screen(self):
return self.__screen
def clear_screen(self):
"""
Clear the screen by coloring it to background color. Does not color the toolbar
:return:
"""
self.__screen.fill(self.get_config(["board_background_color"]), (0, self.get_config(["toolbar_y"]) + 1,
self.get_config(["width"]),
self.get_config(["length"]) - self.get_config(
["toolbar_y"]) + 1))
def is_drawing(self):
return self._draw
def pen_up(self):
self._draw = False
def pen_down(self):
self._draw = True
@property
def name(self):
return self._name
@property
def modification_allowed(self):
return self.__modification_allowed
@property
def last_pos(self):
return self._last_pos
def reset_last_pos(self):
self._last_pos = None
def update_last_pos(self):
self._last_pos = self._mouse_position
def __get_mouse_position(self):
return self._mouse_position
def __set_mouse_position(self, value):
self._mouse_position = value
mouse_position = property(__get_mouse_position, __set_mouse_position)
def get_text_boxes(self):
return self._text_boxes
def append_text_box(self, textbox):
self._text_boxes.append(textbox)
def del_text_box(self, textbox):
self._text_boxes.remove(textbox)
def draw(self, obj, timestamp):
"""
Method to draw figures defined in figures.py. Also adds drawn objects to history.
:param obj: class of figure to draw
:param timestamp: timestamp at which the drawing happens
:return: None
"""
# Draw object on screen
obj.draw(self.__screen)
# Create dict containing object parameters and right timestamp to add to history
hist_obj = {"type": obj.type, "timestamp": timestamp, "params": obj.fetch_params(), "client": self._name}
# Special case if it's a Text_box object, we need to get the correct box id
if hist_obj["type"] == "Text_box":
hist_obj["id"] = obj.id_counter
hist_obj["owner"] = self._name
self.add_to_hist(hist_obj)
def switch_config(self, event):
"""
Switch between different modes
:param event: Action by the user : a mouse click on either modes, colors or font sizes
:return: None
"""
if event == "quit":
self.set_config(["mode"], "quit")
# We go through each mode, color and font size to see if that mode should be triggered by the event
else:
for mod in self.__modes:
if mod.is_triggered(event):
self.set_config(["mode"], mod.name)
for col in self.__colors:
if col.is_triggered(event):
self.set_config(["text_box", "text_color"], col.color)
self.set_config(["active_color"], col.color)
for font_size_ in self.__font_sizes:
if font_size_.is_triggered(event):
self.set_config(["font_size"], font_size_.font_size)
if self.__auth_box.is_triggered(event):
self._erasing_auth = not self._erasing_auth
self.__auth_box.switch(self.__screen, self._erasing_auth, self.__modification_allowed, self._name)
self._hist["auth"] = [self._name, self._erasing_auth]
if self.__save_box.is_triggered(event):
self.__save_box.save(self.__screen, self)
print("Le dessin a été sauvegardé dans le dossier")
def set_active_box(self, box, new=True):
"""
A method specific to text boxes : select an existing box or one that has just been created to edit. This box is
thus said to be "active"
:param box: instance of the TextBox class
:param new: boolean to specify if the box was just created or already existed
:return:
"""
# If the selected box is already the active one, do nothing
if box == self.active_box:
return
# If there is a box that is active we must turn it into "inactive"
if self.active_box is not None:
# Change its color to the "inactive color"
self.active_box.set_textbox_color(self.get_config(["text_box", "inactive_color"]))
# Select the id of previous active box
id_counter = self.active_box.id_counter
# Find the previous active box and change its color in history
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action["params"]["text"] = self.active_box.get_textbox_text()
action['params']["box_color"] = self.get_config(["text_box", "inactive_color"])
# Render it
self.active_box.draw(self.__screen)
# If selected box already exists on the whiteboard we must turn it into "active"
if not new:
id_counter = box.id_counter
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action['params']["box_color"] = self.get_config(["text_box", "active_color"])
# Draw the newly activated box
self.active_box = box
self.active_box.draw(self.__screen)
pygame.display.flip()
def draw_action(self, action):
"""
Draw the result of an action by the user on the whiteboard
:param action: usually a mouse action by the user
:return:
"""
if action["type"] == "Point":
draw_point(action["params"], self.__screen)
if action["type"] == "Line":
draw_line(action["params"], self.__screen)
if action["type"] == "Text_box":
draw_textbox(action["params"], self.__screen)
if action["type"] == "rect":
draw_rect(action["params"], self.__screen)
if action["type"] == "circle":
draw_circle(action["params"], self.__screen)
def load_actions(self, hist):
"""
Load actions from history
:param hist: list of dict representing the history of actions in the whiteboard session
:return:
"""
# Sort actions chronologically
sred = sorted(hist["actions"],
key=lambda value: value["timestamp"])
# Go through each action and draw it
for action in sred:
self.draw_action(action)
pygame.display.flip()
def start(self, connexion_avec_serveur):
"""
Start and run a whiteboard window
:param connexion_avec_serveur: socket to connect with server (socket.socket)
:return:
"""
# Initialize timestamp
last_timestamp_sent = 0
while not self.is_done():
# Browse all events done by user
for event in pygame.event.get():
# If user closes the window, quit the whiteboard
if self.get_config(["mode"]) == "quit":
self.end()
break
# Use specific handling method for current drawing mode
self.__handler[self.get_config(["mode"])].handle_all(event)
# msg_a_envoyer["message"] = "CARRY ON"
# Send dict history to server
if self._hist["auth"] != [self._name, self._erasing_auth]:
self._hist["auth"] = []
new_modifs = [modif for modif in self.get_hist()["actions"] if
(modif["timestamp"] > last_timestamp_sent and self._name == modif["client"])]
message_a_envoyer = {"message": "", 'actions': new_modifs, "auth": self._hist["auth"]}
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
self._hist["auth"] = []
# Update last timestamp sent
if new_modifs:
last_timestamp_sent = max([modif["timestamp"] for modif in new_modifs])
# Dict received from server
try:
new_hist = binary_to_dict(connexion_avec_serveur.recv(2 ** 24))
except (ConnectionResetError, ConnectionAbortedError) as e:
print("Le serveur a été éteint, veuillez le relancer")
self._done = True
pass
# Consider actions made by another client after new_last_timestamp
new_actions = [action for action in new_hist["actions"] if action["client"] != self._name]
for action in new_actions:
# Here there are two cases, a new figure (point, line, rect, circle, new text box) is created or an
# existing text box is modified. For this second case, we use the variable "matched" as indicator
matched = False
if action["type"] == "Text_box":
# Find the text box id
for textbox in [x for x in self._hist["actions"] if x["type"] == "Text_box"]:
if action["id"] == textbox["id"]:
# Modify it with the newly acquired parameters from server
textbox["params"]["text"], textbox["params"]["w"] = action["params"]["text"], \
action["params"]["w"]
action_to_update_textbox = action
for element in self.get_text_boxes():
if element.id_counter == action["id"]:
self.del_text_box(element)
self.append_text_box(TextBox(**action_to_update_textbox["params"]))
# Draw the modified text box with updated parameters
self.clear_screen()
self.load_actions(self._hist)
matched = True
# If we are in the first case, we add the new actions to history and draw them
if not matched:
self.add_to_hist(action)
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
self.draw_action(action)
if self._name in new_hist["auth"]:
new_hist["auth"].remove(self._name)
if new_hist["auth"] != self.__modification_allowed:
self.__modification_allowed = copy.deepcopy(new_hist["auth"])
pygame.display.flip()
# Once we are done, we quit pygame and send end message
pygame.quit()
print("Fermeture de la connexion")
message_a_envoyer["message"] = "END"
try:
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
except (ConnectionResetError, BrokenPipeError) as e:
print("Il n'y a pas de message à envoyer au serveur")
connexion_avec_serveur.close()
def start_local(self):
"""
Starts Whiteboard locally. Used to test stuff and debug.
:return:
"""
while not self.is_done():
for event in pygame.event.get():
if self.get_config(["mode"]) == "quit":
self.end()
break
self.__handler[self.get_config(["mode"])].handle_all(event)
pygame.display.flip()
pygame.quit()
|
"""
Module contenant toutes les figures et opérations de base
"""
import pygame
import pygame.draw
from datetime import datetime
def distance(v1, v2):
"""
Calcule la distance euclidienne entre deux vecteurs
"""
try:
return ((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2) ** 0.5
except TypeError:
return "Ce ne sont pas des vecteurs"
class Figure:
def __init__(self):
pass
def draw(self):
pass
def fetch_params(self):
pass
class Point(Figure):
"""
Classe d'un point prêt à être tracé sur le tableau
coord (list) : coordonées
point_color (list) : couleur en RGB
font_size (int) : epaisseur en pixels
toolbar_size (int) : epaisseur de la toolbar en haut du tableau sur laquelle on ne veut pas que le point depasse
"""
def __init__(self, coord, point_color, font_size, toolbar_size=0):
Figure.__init__(self)
self.point_color = point_color
self.font_size = font_size
# used to not write on the toolbar if the font size is big
self.coord = [coord[0], max(coord[1], toolbar_size + font_size + 1)]
self.type = "Point"
def draw(self, screen):
"""
Dessine le point sur l'ecran
"""
pygame.draw.circle(screen, self.point_color, self.coord, self.font_size)
pygame.display.flip()
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"coord": self.coord, "point_color": self.point_color, "font_size": self.font_size}
class Line(Figure):
"""
Classe d'une ligne droite
line_color (list) : couleur de la ligne en RGB
start_pos (list): coordonee du debut de la ligne droite
end_pos (list) : coordonee de la fin de la ligne droite
font_size (int): epaisseur
"""
def __init__(self, line_color, start_pos, end_pos, font_size):
Figure.__init__(self)
self.line_color = line_color
self.start_pos = start_pos
self.end_pos = end_pos
self.font_size = font_size
self.type = "Line"
def draw(self, screen):
"""
Dessine la ligne sur l'ecran
"""
pygame.draw.line(screen, self.line_color, self.start_pos, self.end_pos, self.font_size)
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"line_color": self.line_color, "start_pos": self.start_pos, "end_pos": self.end_pos,
"font_size": self.font_size}
class Rectangle(Figure):
"""
Classe d un rectangle
color (list) : couleur du rectangle
left, right (int) : coordonees d'absice a gauche, droite du rectangle
bottom, top (int) : coordonees d'ordonnee en haut et en bas du rectangle
"""
def __init__(self, c1, c2, color):
"""
On definit les parametres du rectangle a partir des coordonees de deux coins
c1, c2 (lists): coordonees de deux coins du rectangle
"""
Figure.__init__(self)
self.c1 = c1
self.c2 = c2
self.color = color
# on recupere left avec le min des abscisses et on fait pareil pour right top et bottom
self.left = min(c1[0], c2[0])
self.top = min(c1[1], c2[1])
self.right = max(c1[0], c2[0])
self.bottom = max(c1[1], c2[1])
self.width = self.right - self.left
self.length = self.bottom - self.top
self.rect = pygame.Rect(self.left, self.top, self.width, self.length)
self.type = "rect"
def draw(self, screen):
"""
Dessine le rectangle sur l'ecran
"""
pygame.draw.rect(screen, self.color, self.rect, 0)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"c1": self.c1, "c2": self.c2, "color": self.color}
class Circle(Figure):
"""
Classe d un cercle
center (list) : les coordonees du centre
extremity (list) : les coordonees d'une extremite
color (list) : couleur
toolbar_size (int) : la taille de la toolbar en pixel pour ne pas dessiner dessus
radius (int) : rayon
"""
def __init__(self, center, extremity, color, toolbar_size=0):
Figure.__init__(self)
self.center = center
# on ne veut pas depasser sur la toolbar donc on reduit le rayon
self.radius = min(int(distance(center, extremity)), center[1] - toolbar_size - 1)
self.extremity = [center[0] + self.radius, center[1]]
self.color = color
self.type = "circle"
def draw(self, screen):
"""
dessine le cercle sur l ecran
"""
pygame.draw.circle(screen, self.color, self.center, self.radius)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"center": self.center, "extremity": self.extremity, "color": self.color}
class TextBox(Figure):
"""
Classe d une textbox
x, y (int) : l'abscisse a gauche et l'ordonee a droite de la textbox ie (x,y) est le topleft
w (int) : longueur de la textbox
h (int) : hauteur de la textbox
box_color (list) : couleur du contour de la box
font (string) : police du texte
font_size (int) : taille des caracteres
text (string) : texte de la texbox
text_color (list) : couleur du texte
"""
def __init__(self, x, y, w, h, box_color, font, font_size, text, text_color):
Figure.__init__(self)
self.__rect = pygame.Rect(x, y, w, h)
self._color = box_color
self._text = text
self._font = font
self._font_size = font_size
self._sysfont = pygame.font.SysFont(font, font_size)
self._text_color = text_color
self._txt_surface = self._sysfont.render(text, True, self._text_color)
self.id_counter = str(x) + "_" + str(y)
self.type = "Text_box"
"""
Encapsulation
"""
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"x": self.__rect.x, "y": self.__rect.y, "w": self.__rect.w, "h": self.__rect.h,
"box_color": self._color, "font": self._font, "font_size": self._font_size, "text": self._text,
"text_color": self._text_color}
def get_textbox_color(self):
return self._color
def set_textbox_color(self, new_color):
self._color = new_color
def get_textbox_text(self):
return self._text
def add_character_to_text(self, char, whiteboard):
"""
rajoute un caractere au texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text += char
action['params']["text"] = whiteboard.active_box.get_textbox_text()
action['params']["w"] = whiteboard.active_box.update()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def delete_char_from_text(self, whiteboard):
"""
efface le dernier caractere du texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text = self._text[:-1]
action['params']["text"] = whiteboard.active_box.get_textbox_text()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def render_font(self, text, color, antialias=True):
"""
effectue le rendu du texte
"""
return self._sysfont.render(text, antialias, color)
def set_txt_surface(self, value):
self._txt_surface = value
@property
def rect(self):
return self.__rect
def update(self):
"""
Change la taille du rectangle de contour si le texte est trop long
"""
width = max(140, self._txt_surface.get_width() + 20)
self.__rect.w = width
return width
def draw(self, screen):
"""
dessine la textbox
"""
# Blit le texte
screen.blit(self._txt_surface, (self.__rect.x + 5, self.__rect.y + 5))
# Blit le rectangle
pygame.draw.rect(screen, self._color, self.__rect, 2)
# =============================================================================
# fonction de dessins instantanees
# =============================================================================
def draw_point(params, screen):
"""
dessine un point sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Point(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_line(params, screen):
"""
dessine une ligne sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Line(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_textbox(params, screen):
"""
dessine une textbox sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return TextBox(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_rect(params, screen):
"""
dessine un rectangle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Rectangle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_circle(params, screen):
"""
dessine un cercle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Circle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
--- FILE SEPARATOR ---
"""
Module contenant les differents outils de gestion du tableau
"""
import pygame
import pygame.draw
from datetime import datetime
from figures import Point, Line, TextBox, Rectangle, Circle
import time
# =============================================================================
# classes de gestion des changements de parametres utilisateur
# =============================================================================
class TriggerBox:
"""
Classe mere abstraite qui represente une zone carree de l'ecran sur laquelle on peut cliquer
top_left (list) : coordonees du pixel en haut a gauche
size (int) : taille en pixel du cote du carre
"""
def __init__(self, top_left, size):
self.rect = pygame.Rect(top_left, size)
self.coords = top_left
def is_triggered(self, event):
"""
retourne le booleen : l'utilisateur clique sur la triggerbox
event (pygame event) : clic de souris d un utilisateur
"""
return self.rect.collidepoint(event.pos)
class Auth(TriggerBox):
"""
Classe d'un bouton qui change l'autorisation de modification
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la authbox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
font = pygame.font.Font(None, 18)
legend = {"text": font.render("auth", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def switch(self, screen, erasing_auth, modification_allowed, name):
if erasing_auth:
pygame.draw.circle(screen, [0, 255, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a donné son autorisation de modifications".format(name))
else:
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a retiré son autorisation de modifications".format(name))
return [name, erasing_auth]
class Save(TriggerBox):
"""
Classe d'un bouton qui permet la sauvegarde du whiteboard en format PNG
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la savebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render("save", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def save(self, screen, whiteboard):
pygame.image.save(screen.subsurface((0, whiteboard.get_config(["toolbar_y"]) + 1,
whiteboard.get_config(["width"]),
whiteboard.get_config(["length"]) - whiteboard.get_config(
["toolbar_y"]) - 1)), "mygreatdrawing.png")
class Mode(TriggerBox):
"""
Classe d'un mode de dessin du tableau dans lequel on peut rentrer via la triggerbox dont il herite
name (string) : nom du mode qui sera inscrit dans sa triggerbox sur l'ecran
"""
def __init__(self, name, top_left, size):
super(Mode, self).__init__(top_left, size)
self.name = name
def add(self, screen):
"""
Dessine la triggerbox du mode et la rend active sur l'ecran
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render(self.name, True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
class ColorBox(TriggerBox):
"""
Classe d'une triggerbox de choix de couleur sur l'ecran
color (list) : color of the box
"""
def __init__(self, color, top_left, size):
super(ColorBox, self).__init__(top_left, size)
self.color = color
def add(self, screen):
"""
Dessine la colorbox
"""
pygame.draw.rect(screen, self.color, self.rect)
class FontSizeBox(TriggerBox):
"""
Classe des triggerbox de choix de l'epaisseur du trait
font_size (int) : epaisseur du trait en pixel
"""
def __init__(self, font_size, top_left, size):
super(FontSizeBox, self).__init__(top_left, size)
self.font_size = font_size
self.center = [top_left[0] + size[0] // 2,
top_left[1] + size[1] // 2] # pour dessiner un cercle representant l epaisseur de selection
def add(self, screen):
"""
Dessine la fontsizebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [0, 0, 0], self.center, self.font_size)
# =============================================================================
# classes de gestion des evenements utilisateur
# =============================================================================
class EventHandler:
"""
Classe mere des gestionnaires d'evenements utilisateur en fontcion des modes
whiteboard : classe whiteboard sur laquelle notre handler va gerer les evenements utilisateur
"""
def __init__(self, whiteboard):
self.whiteboard = whiteboard
def handle(self, event):
"""
Ce test commun a tous les modes verifie si l'utilisateur quitte ou change de mode
"""
out = False
if event.type == pygame.QUIT:
self.whiteboard.end()
self.whiteboard.switch_config("quit")
out = True
if event.type == pygame.MOUSEBUTTONDOWN:
coord = event.dict['pos']
if coord[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.switch_config(event)
out = True
return out
class HandlePoint(EventHandler):
"""
Classe du gestionnaire d'evenement en mode point
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_all(self, event):
"""
En mode point on s'interesse aux clics gauches de souris et on dessine un point
"""
handled = self.handle(event)
# commun a tous les handler qui verifie si on change de mode ou on quitte
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
if event.dict["button"] != 1:
return
coord = event.dict["pos"]
to_draw = Point(coord,
self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["font_size"]), self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
class HandleLine(EventHandler):
"""
Classe du gestionnaire d'evenement en mode ligne
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_mouse_motion(self):
"""
Gere les mouvements de souris : l'utilisateur a le clic enfonce le rendu du trait est en direct
"""
if self.whiteboard.is_drawing():
self.whiteboard.mouse_position = pygame.mouse.get_pos()
if self.whiteboard.mouse_position[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.pen_up()
elif self.whiteboard.last_pos is not None:
to_draw = Line(self.whiteboard.get_config(["active_color"]), self.whiteboard.last_pos,
self.whiteboard.mouse_position,
self.whiteboard.get_config(["font_size"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.whiteboard.update_last_pos()
def handle_mouse_button_up(self):
"""
Gere la levee du doigt sur le clic : on effectue un pen up
"""
self.whiteboard.mouse_position = (0, 0)
self.whiteboard.pen_up()
self.whiteboard.reset_last_pos()
def handle_mouse_button_down(self):
"""
Gere le clic de l'utilisateur : pen down
"""
self.whiteboard.pen_down()
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEMOTION:
self.handle_mouse_motion()
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down()
pygame.display.flip()
class HandleText(EventHandler):
"""
Classe du gestionnaire d'evenement en mode textbox
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def box_selection(self, event):
"""
Gere les clics utilisateur
S'il s'agit d'un clic droit, on cree une nouvelle box
S'il s'agit d'un clic gauche on regarde si cela selectionne une zone d une ancienne box qui deviendra la box
active
"""
if event.dict["button"] == 3:
coord = event.dict['pos']
text_box = TextBox(*coord, self.whiteboard.get_config(["text_box", "textbox_width"]),
self.whiteboard.get_config(["text_box", "textbox_length"]),
self.whiteboard.get_config(["text_box", "active_color"]),
self.whiteboard.get_config(["text_box", "font"]),
self.whiteboard.get_config(["text_box", "font_size"]), "",
self.whiteboard.get_config(["active_color"]))
self.whiteboard.append_text_box(text_box)
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(text_box, timestamp)
self.whiteboard.set_active_box(text_box)
elif event.dict["button"] == 1:
for box in self.whiteboard.get_text_boxes():
if box.rect.collidepoint(event.pos):
self.whiteboard.set_active_box(box, new=False)
def write_in_box(self, event):
"""
Gere les entrees clavier de l'utilisateur
Si une box est selectionnee cela modifie le texte en consequence
"""
if self.whiteboard.active_box is not None:
# on efface un caractere
if event.key == pygame.K_BACKSPACE:
self.whiteboard.active_box.delete_char_from_text(self.whiteboard)
# pour modifier la box il est malheureusement necessaire de re-render tout le tableau
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
elif event.key == pygame.K_TAB or event.key == pygame.K_RETURN:
pass
else:
self.whiteboard.active_box.add_character_to_text(event.unicode, self.whiteboard)
# on re-render tout aussi ici pour éviter de superposer des écritures
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
if self.whiteboard.active_box is not None:
# Re-render the text.
self.whiteboard.active_box.set_txt_surface(self.whiteboard.active_box.render_font(
self.whiteboard.active_box.get_textbox_text(),
self.whiteboard.active_box.get_textbox_color()))
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associée via un arbre de if
"""
handled = self.handle(event)
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
self.box_selection(event)
if event.type == pygame.KEYDOWN:
self.write_in_box(event)
pygame.display.flip()
class HandleRect(EventHandler):
"""
Classe du gestionnaire d'evenement en mode rectangle
Nous avons decidé de faire un systeme de clic drag pour tracer un rectangle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.c1 = None
def handle_mouse_button_up(self, coord):
"""
Recupere la deuxieme coordonee d'un coin du rectangle a tracer quand l'utilisateur arrete de cliquer
"""
if self.c1 is not None:
coord = list(coord)
# on ne veut pas depasser sur la toolbar
coord[1] = max(self.whiteboard.get_config(["toolbar_y"]), coord[1])
to_draw = Rectangle(self.c1, coord, self.whiteboard.get_config(["active_color"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.c1 = None
def handle_mouse_button_down(self, event):
"""
Recupere une coordonee d'un coin du rectangle a tracer quand l'utilisateur démarre un clic
"""
if event.dict["button"] != 1:
return
self.c1 = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
class HandleCircle(EventHandler):
"""
Classe du gestionnaire d'evenement en mode Cercle
Nous avons decidé de faire un systeme de clic drag la-encore pour tracer un cercle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.center = None
def handle_mouse_button_up(self, coord):
"""
Recupere la coordonee d'un point sur le cercle quand l'utilisateur arrete de cliquer
"""
if self.center is not None:
coord = list(coord)
to_draw = Circle(self.center, coord, self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.center = None
def handle_mouse_button_down(self, event):
"""
Recupere la coordonnee du centre du cercle quand l'utilisateur demarre un clic
"""
if event.dict["button"] != 1:
return
self.center = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
|
{
"imported_by": [
"/src/client.py",
"/src/main.py"
],
"imports": [
"/src/figures.py",
"/src/tools.py"
]
}
|
pyfaddist/yafcorse
|
/tests/conftest.py
|
import pytest
from flask import Flask
from yafcorse import Yafcorse
@pytest.fixture()
def app():
app = Flask(__name__)
cors = Yafcorse({
'origins': '*',
'allowed_methods': ['GET', 'POST', 'PUT'],
'allowed_headers': ['Content-Type', 'X-Test-Header'],
'allow_credentials': True,
'cache_max_age': str(60 * 5)
})
cors.init_app(app)
return app
@pytest.fixture()
def client(app: Flask):
return app.test_client()
|
import re
from typing import Callable, Iterable
from flask import Flask, Response, request
# Yet Another Flask CORS Extension
# --------------------------------
# Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS
# DEFAULT_CONFIGURATION = {
# 'origins': '*',
# 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'],
# 'allowed_headers': '*',
# 'allow_credentials': True,
# 'cache_max_age': str(60 * 5)
# }
DEFAULT_CONFIGURATION = {
'origins': None,
'allowed_methods': [],
'allowed_headers': None,
'allow_credentials': False,
'cache_max_age': None
}
class Yafcorse(object):
def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None:
super().__init__()
self.__initialized = False
self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins'))
self.__regex_origin_patterns = configuration.get('origin_patterns', None)
self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods'))
self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers'))
self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials'))
self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age'))
self.__allowed_methods_value = ''
self.__allowed_headers_value = ''
self.init_app(app)
def init_app(self, app: Flask):
if not self.__initialized and app:
self.__allowed_methods_value = ', '.join(self.__allowed_methods)
self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods]
self.__allowed_headers_value = ', '.join(self.__allowed_headers)
self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers]
if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)):
self.__validate_origin = _check_if_contains_origin(self.__origins)
elif isinstance(self.__origins, Callable):
self.__validate_origin = self.__origins
elif self.__regex_origin_patterns is not None:
self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns)
else:
self.__validate_origin = _check_if_asterisk_origin(self.__origins)
app.after_request(self.__handle_response)
app.extensions['yafcorse'] = self
self.__initialized = True
def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False):
response.headers.add_header('Access-Control-Allow-Origin', origin)
if 'Access-Control-Request-Method' in request.headers \
and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods:
response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value)
if 'Access-Control-Request-Headers' in request.headers \
and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers):
response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value)
if self.__allow_credentials:
response.headers.add_header('Access-Control-Allow-Credentials', 'true')
if is_preflight_request:
response.headers.add_header('Access-Control-Max-Age', self.__max_age)
def __handle_response(self, response: Response):
is_preflight_request = request.method == 'OPTIONS'
if not is_preflight_request and 'Origin' not in request.headers:
return response
origin = request.headers.get('Origin')
if not self.__validate_origin(origin):
return response
self.__append_headers(response, origin, is_preflight_request)
return response
def _string_list_in(target: list[str], source: list[str]):
contained = [element for element in target if element.strip().lower() in source]
return contained == target
def _check_if_regex_match_origin(patterns):
compiled_patterns = [re.compile(p) for p in patterns]
def execute_check(origin):
for matcher in compiled_patterns:
if matcher.match(origin):
return True
return False
execute_check.__name__ = _check_if_regex_match_origin.__name__
return execute_check
def _check_if_contains_origin(origins):
def execute_check(origin):
for o in origins:
if o == origin:
return True
return False
execute_check.__name__ = _check_if_contains_origin.__name__
return execute_check
def _check_if_asterisk_origin(origins):
allow_all = origins == '*'
def execute_check(origin):
return allow_all and origin is not None
execute_check.__name__ = _check_if_asterisk_origin.__name__
return execute_check
|
{
"imported_by": [],
"imports": [
"/src/yafcorse/__init__.py"
]
}
|
pyfaddist/yafcorse
|
/tests/test_ceate_extensions.py
|
from flask.app import Flask
from yafcorse import Yafcorse
def test_extension(app: Flask):
assert app.extensions.get('yafcorse') is not None
assert isinstance(app.extensions.get('yafcorse'), Yafcorse)
|
import re
from typing import Callable, Iterable
from flask import Flask, Response, request
# Yet Another Flask CORS Extension
# --------------------------------
# Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS
# DEFAULT_CONFIGURATION = {
# 'origins': '*',
# 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'],
# 'allowed_headers': '*',
# 'allow_credentials': True,
# 'cache_max_age': str(60 * 5)
# }
DEFAULT_CONFIGURATION = {
'origins': None,
'allowed_methods': [],
'allowed_headers': None,
'allow_credentials': False,
'cache_max_age': None
}
class Yafcorse(object):
def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None:
super().__init__()
self.__initialized = False
self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins'))
self.__regex_origin_patterns = configuration.get('origin_patterns', None)
self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods'))
self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers'))
self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials'))
self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age'))
self.__allowed_methods_value = ''
self.__allowed_headers_value = ''
self.init_app(app)
def init_app(self, app: Flask):
if not self.__initialized and app:
self.__allowed_methods_value = ', '.join(self.__allowed_methods)
self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods]
self.__allowed_headers_value = ', '.join(self.__allowed_headers)
self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers]
if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)):
self.__validate_origin = _check_if_contains_origin(self.__origins)
elif isinstance(self.__origins, Callable):
self.__validate_origin = self.__origins
elif self.__regex_origin_patterns is not None:
self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns)
else:
self.__validate_origin = _check_if_asterisk_origin(self.__origins)
app.after_request(self.__handle_response)
app.extensions['yafcorse'] = self
self.__initialized = True
def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False):
response.headers.add_header('Access-Control-Allow-Origin', origin)
if 'Access-Control-Request-Method' in request.headers \
and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods:
response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value)
if 'Access-Control-Request-Headers' in request.headers \
and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers):
response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value)
if self.__allow_credentials:
response.headers.add_header('Access-Control-Allow-Credentials', 'true')
if is_preflight_request:
response.headers.add_header('Access-Control-Max-Age', self.__max_age)
def __handle_response(self, response: Response):
is_preflight_request = request.method == 'OPTIONS'
if not is_preflight_request and 'Origin' not in request.headers:
return response
origin = request.headers.get('Origin')
if not self.__validate_origin(origin):
return response
self.__append_headers(response, origin, is_preflight_request)
return response
def _string_list_in(target: list[str], source: list[str]):
contained = [element for element in target if element.strip().lower() in source]
return contained == target
def _check_if_regex_match_origin(patterns):
compiled_patterns = [re.compile(p) for p in patterns]
def execute_check(origin):
for matcher in compiled_patterns:
if matcher.match(origin):
return True
return False
execute_check.__name__ = _check_if_regex_match_origin.__name__
return execute_check
def _check_if_contains_origin(origins):
def execute_check(origin):
for o in origins:
if o == origin:
return True
return False
execute_check.__name__ = _check_if_contains_origin.__name__
return execute_check
def _check_if_asterisk_origin(origins):
allow_all = origins == '*'
def execute_check(origin):
return allow_all and origin is not None
execute_check.__name__ = _check_if_asterisk_origin.__name__
return execute_check
|
{
"imported_by": [],
"imports": [
"/src/yafcorse/__init__.py"
]
}
|
pyfaddist/yafcorse
|
/tests/test_origins_function.py
|
import pytest
from flask import Flask, Response
from flask.testing import FlaskClient
from yafcorse import Yafcorse
@pytest.fixture()
def local_app():
app = Flask(__name__)
cors = Yafcorse({
'allowed_methods': ['GET', 'POST', 'PUT'],
'allowed_headers': ['Content-Type', 'X-Test-Header'],
'origins': lambda origin: origin == 'https://from_lambda'
})
cors.init_app(app)
return app
@pytest.fixture()
def local_client(local_app: Flask):
return local_app.test_client()
def test_origin_function(local_client: FlaskClient):
response: Response = local_client.options('/some-request', headers={
'Origin': 'https://from_lambda'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() in response.headers
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') is not None
assert response.headers.get('Access-Control-Allow-Origin') == 'https://from_lambda'
assert response.headers.get('Access-Control-Max-Age') is not None
assert response.headers.get('Access-Control-Max-Age') != ''
def test_origin_function_fail(local_client: FlaskClient):
response: Response = local_client.options('/some-request', headers={
'Origin': 'https://other_than_lambda'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() not in response.headers
assert 'Access-Control-Max-Age'.lower() not in response.headers
|
import re
from typing import Callable, Iterable
from flask import Flask, Response, request
# Yet Another Flask CORS Extension
# --------------------------------
# Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS
# DEFAULT_CONFIGURATION = {
# 'origins': '*',
# 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'],
# 'allowed_headers': '*',
# 'allow_credentials': True,
# 'cache_max_age': str(60 * 5)
# }
DEFAULT_CONFIGURATION = {
'origins': None,
'allowed_methods': [],
'allowed_headers': None,
'allow_credentials': False,
'cache_max_age': None
}
class Yafcorse(object):
def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None:
super().__init__()
self.__initialized = False
self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins'))
self.__regex_origin_patterns = configuration.get('origin_patterns', None)
self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods'))
self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers'))
self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials'))
self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age'))
self.__allowed_methods_value = ''
self.__allowed_headers_value = ''
self.init_app(app)
def init_app(self, app: Flask):
if not self.__initialized and app:
self.__allowed_methods_value = ', '.join(self.__allowed_methods)
self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods]
self.__allowed_headers_value = ', '.join(self.__allowed_headers)
self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers]
if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)):
self.__validate_origin = _check_if_contains_origin(self.__origins)
elif isinstance(self.__origins, Callable):
self.__validate_origin = self.__origins
elif self.__regex_origin_patterns is not None:
self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns)
else:
self.__validate_origin = _check_if_asterisk_origin(self.__origins)
app.after_request(self.__handle_response)
app.extensions['yafcorse'] = self
self.__initialized = True
def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False):
response.headers.add_header('Access-Control-Allow-Origin', origin)
if 'Access-Control-Request-Method' in request.headers \
and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods:
response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value)
if 'Access-Control-Request-Headers' in request.headers \
and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers):
response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value)
if self.__allow_credentials:
response.headers.add_header('Access-Control-Allow-Credentials', 'true')
if is_preflight_request:
response.headers.add_header('Access-Control-Max-Age', self.__max_age)
def __handle_response(self, response: Response):
is_preflight_request = request.method == 'OPTIONS'
if not is_preflight_request and 'Origin' not in request.headers:
return response
origin = request.headers.get('Origin')
if not self.__validate_origin(origin):
return response
self.__append_headers(response, origin, is_preflight_request)
return response
def _string_list_in(target: list[str], source: list[str]):
contained = [element for element in target if element.strip().lower() in source]
return contained == target
def _check_if_regex_match_origin(patterns):
compiled_patterns = [re.compile(p) for p in patterns]
def execute_check(origin):
for matcher in compiled_patterns:
if matcher.match(origin):
return True
return False
execute_check.__name__ = _check_if_regex_match_origin.__name__
return execute_check
def _check_if_contains_origin(origins):
def execute_check(origin):
for o in origins:
if o == origin:
return True
return False
execute_check.__name__ = _check_if_contains_origin.__name__
return execute_check
def _check_if_asterisk_origin(origins):
allow_all = origins == '*'
def execute_check(origin):
return allow_all and origin is not None
execute_check.__name__ = _check_if_asterisk_origin.__name__
return execute_check
|
{
"imported_by": [],
"imports": [
"/src/yafcorse/__init__.py"
]
}
|
ericfourrier/auto-clean
|
/autoc/__init__.py
|
__all__ = ["explorer", "naimputer"]
from .explorer import DataExploration
from .naimputer import NaImputer
from .preprocess import PreProcessor
from .utils.getdata import get_dataset
# from .preprocess import PreProcessor
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
--- FILE SEPARATOR ---
from autoc.explorer import DataExploration, pd
from autoc.utils.helpers import cserie
import seaborn as sns
import matplotlib.pyplot as plt
#from autoc.utils.helpers import cached_property
from autoc.utils.corrplot import plot_corrmatrix
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats.mstats import ks_2samp
def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs):
""" Returns missing map plot like in amelia 2 package in R """
f, ax = plt.subplots(figsize=figsize)
if nmax < df.shape[0]:
df_s = df.sample(n=nmax) # sample rows if dataframe too big
return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs)
# class ColumnNaInfo
class NaImputer(DataExploration):
def __init__(self, *args, **kwargs):
super(NaImputer, self).__init__(*args, **kwargs)
self.get_data_isna()
@property
def nacols(self):
""" Returns a list of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
@property
def nacols_i(self):
""" Returns the index of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
def get_overlapping_matrix(self, normalize=True):
""" Look at missing values overlapping """
arr = self.data_isna.astype('float').values
arr = np.dot(arr.T, arr)
if normalize:
arr = arr / (arr.max(axis=1)[:, None])
index = self.nacols
res = pd.DataFrame(index=index, data=arr, columns=index)
return res
def infos_na(self, na_low=0.05, na_high=0.90):
""" Returns a dict with various infos about missing values """
infos = {}
infos['nacolcount'] = self.nacolcount()
infos['narowcount'] = self.narowcount()
infos['nb_total_na'] = self.total_missing
infos['many_na_col'] = self.manymissing(pct=na_high)
infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low)
infos['total_pct_na'] = self.nacolcount().Napercentage.mean()
return infos
def get_isna(self, col):
""" Returns a dummy variable indicating in a observation of a specific col
is na or not 0 -> not na , 1 -> na """
return self.data.loc[:, col].isnull().astype(int)
@property
def data_isna_m(self):
""" Returns merged dataframe (data, data_is_na)"""
return pd.concat((self.data, self.data_isna), axis=1)
def get_data_isna(self, prefix="is_na_", filter_nna=True):
""" Returns dataset with is_na columns from the a dataframe with missing values
Parameters
----------
prefix : str
the name of the prefix that will be append to the column name.
filter_nna: bool
True if you want remove column without missing values.
"""
if not filter_nna:
cols_to_keep = self.data.columns
else:
cols_to_keep = self.nacols
data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int)
data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep]
self.data_isna = data_isna
return self.data_isna
def get_corrna(self, *args, **kwargs):
""" Get matrix of correlation of na """
return self.data_isna.corr(*args, **kwargs)
def corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
print("This function is deprecated")
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs):
""" Plot conditionnal density plot from all columns or subset based on
is_na_colname 0 or 1"""
colname_na = prefix + colname
density_columns = self.data.columns if subset is None else subset
# filter only numeric values and different values from is_na_col
density_columns = [c for c in density_columns if (
c in self._dfnum and c != colname)]
print(density_columns)
for col in density_columns:
g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na,
size=size, *args, **kwargs)
g.map(sns.distplot, col)
def get_isna_mean(self, colname, prefix="is_na_"):
""" Returns empirical conditional expectatation, std, and sem of other numerical variable
for a certain colname with 0:not_a_na 1:na """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
measure_var = self.data.columns.tolist()
measure_var = [c for c in measure_var if c != colname]
functions = ['mean', 'std', 'sem']
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose()
def get_isna_ttest_s(self, colname_na, colname, type_test="ks"):
""" Returns tt test for colanme-na and a colname """
index_na = self.data.loc[:, colname_na].isnull()
measure_var = self.data.loc[:, colname].dropna() # drop na vars
if type_test == "ttest":
return ttest_ind(measure_var[index_na], measure_var[~index_na])
elif type_test == "ks":
return ks_2samp(measure_var[index_na], measure_var[~index_na])
def get_isna_ttest(self, colname_na, type_test="ks"):
res = pd.DataFrame()
col_to_compare = [c for c in self._dfnum if c !=
colname_na] # remove colname_na
for col in col_to_compare:
ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test)
res.loc[col, 'pvalue'] = ttest[1]
res.loc[col, 'statistic'] = ttest[0]
res.loc[col, 'type_test'] = type_test
return res
def isna_summary(self, colname, prefix="is_na_"):
""" Returns summary from one col with describe """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose()
def delete_narows(self, pct, index=False):
""" Delete rows with more na percentage than > perc in data
Return the index
Arguments
---------
pct : float
percentage of missing values, rows with more na percentage
than > perc are deleted
index : bool, default False
True if you want an index and not a Dataframe
verbose : bool, default False
True if you want to see percentage of data discarded
Returns
--------
- a pandas Dataframe with rows deleted if index=False, index of
columns to delete either
"""
index_missing = self.manymissing(pct=pct, axis=0, index=False)
pct_missing = len(index_missing) / len(self.data.index)
if verbose:
print("There is {0:.2%} rows matching conditions".format(
pct_missing))
if not index:
return self.data.loc[~index_missing, :]
else:
return index_missing
def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'):
""" fill values in a serie default with the mean for numeric or the most common
factor for categorical variable"""
if special_value is not None:
# "Missing for example"
return self.data.loc[:, colname].fillna(special_value)
elif self.data.loc[:, colname].dtype == float:
# fill with median
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median())
elif self.is_int_factor(colname, threshold_factor):
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0])
# fillna for datetime with the method provided by pandas
elif self.data.loc[:, colname].dtype == '<M8[ns]':
return self.data.loc[:, colname].fillna(method=date_method)
else:
# Fill with most common value
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0])
def basic_naimputation(self, columns_to_process=[], threshold=None):
""" this function will return a dataframe with na value replaced int
the columns selected by the mean or the most common value
Arguments
---------
- columns_to_process : list of columns name with na values you wish to fill
with the fillna_serie function
Returns
--------
- a pandas DataFrame with the columns_to_process filled with the fillena_serie
"""
# self.data = self.df.copy()
if threshold:
columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold)
self.data.loc[:, columns_to_process] = self.data.loc[
:, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name))
return self.data
def split_tt_na(self, colname, index=False):
""" Split the dataset returning the index of test , train """
index_na = self.data.loc[:, colname].isnull()
index_test = (index_na == True)
index_train = (index_na == False)
if index:
return index_test, index_train
else:
return self.data.loc[index_test, :], self.data.loc[index_train, :]
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : The purpose of this class is too automaticely transfrom a DataFrame
into a numpy ndarray in order to use an aglorithm
"""
#########################################################
# Import modules and global helpers
#########################################################
from autoc.explorer import DataExploration, pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from autoc.exceptions import NumericError
class PreProcessor(DataExploration):
subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other']
def __init__(self, *args, **kwargs):
super(PreProcessor, self).__init__(*args, **kwargs)
self.long_str_cutoff = 80
self.short_str_cutoff = 30
self.perc_unique_cutoff = 0.2
self.nb_max_levels = 20
def basic_cleaning(self,filter_nacols=True, drop_col=None,
filter_constantcol=True, filer_narows=True,
verbose=True, filter_rows_duplicates=True, inplace=False):
"""
Basic cleaning of the data by deleting manymissing columns,
constantcol, full missing rows, and drop_col specified by the user.
"""
col_to_remove = []
index_to_remove = []
if filter_nacols:
col_to_remove += self.nacols_full
if filter_constantcol:
col_to_remove += list(self.constantcol())
if filer_narows:
index_to_remove += cserie(self.narows_full)
if filter_rows_duplicates:
index_to_remove += cserie(self.data.duplicated())
if isinstance(drop_col, list):
col_to_remove += drop_col
elif isinstance(drop_col, str):
col_to_remove += [drop_col]
else:
pass
col_to_remove = list(set(col_to_remove))
index_to_remove = list(set(index_to_remove))
if verbose:
print("We are removing the folowing columns : {}".format(col_to_remove))
print("We are removing the folowing rows : {}".format(index_to_remove))
if inplace:
return self.data.drop(index_to_remove).drop(col_to_remove, axis=1)
else:
return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1)
def _infer_subtype_col(self, colname):
""" This fonction tries to infer subtypes in order to preprocess them
better for skicit learn. You can find the different subtypes in the class
variable subtypes
To be completed ....
"""
serie_col = self.data.loc[:, colname]
if serie_col.nunique() == 2:
return 'binary'
elif serie_col.dtype.kind == 'O':
if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff:
return "text_long"
elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels:
return 'text_categorical'
elif self.is_numeric(colname):
if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels:
return "ordinal"
else :
return "other"
def infer_subtypes(self):
""" Apply _infer_subtype_col to the whole DataFrame as a dictionnary """
return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns}
def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01):
""" Returns True if we detect in the serie a factor variable
A string factor is based on the following caracteristics :
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable
threshold_value : float
the nb of of unique value in percentage of the dataframe length
"""
# False for numeric columns
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
if self.is_numeric(colname):
return False
# False for categorical columns
if self.data.loc[:, colname].dtype == "category":
return False
unique_value = set()
for i, v in self.data.loc[:, colname], iteritems():
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(v)
return True
def get_factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" Return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable.
threshold_value : float
the nb of of unique value in percentage of the dataframe length.
index: bool
False, returns a list, True if you want an index.
"""
res = self.data.apply(lambda x: self.infer_categorical_str(x))
if index:
return res
else:
return cserie(res)
def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs):
factors_col = self.get_factors(*args, **kwargs)
if verbose:
print("We are converting following columns to categorical :{}".format(
factors_col))
if inplace:
self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category)
else:
return self.df.loc[:, factors_col].astype(category)
def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True):
""" Replace a variable with too many categories by grouping minor categories to one """
if self.data.loc[:, colname].nunique() < nb_max_levels:
if verbose:
print("{} has not been processed because levels < {}".format(
colname, nb_max_levels))
else:
if self.is_numeric(colname):
raise NumericError(
'{} is a numeric columns you cannot use this function'.format())
top_levels = self.data.loc[
:, colname].value_counts[0:nb_max_levels].index
self.data.loc[~self.data.loc[:, colname].isin(
top_levels), colname] = replace_value
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Get data from https://github.com/ericfourrier/autoc-datasets
"""
import pandas as pd
def get_dataset(name, *args, **kwargs):
"""Get a dataset from the online repo
https://github.com/ericfourrier/autoc-datasets (requires internet).
Parameters
----------
name : str
Name of the dataset 'name.csv'
"""
path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
return pd.read_csv(path, *args, **kwargs)
|
{
"imported_by": [],
"imports": [
"/autoc/explorer.py",
"/autoc/naimputer.py",
"/autoc/preprocess.py",
"/autoc/utils/getdata.py"
]
}
|
ericfourrier/auto-clean
|
/autoc/explorer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
|
{
"imported_by": [
"/test.py",
"/autoc/naimputer.py",
"/autoc/preprocess.py",
"/autoc/__init__.py",
"/autoc/outliersdetection.py"
],
"imports": [
"/autoc/utils/helpers.py",
"/autoc/exceptions.py"
]
}
|
ericfourrier/auto-clean
|
/autoc/naimputer.py
|
from autoc.explorer import DataExploration, pd
from autoc.utils.helpers import cserie
import seaborn as sns
import matplotlib.pyplot as plt
#from autoc.utils.helpers import cached_property
from autoc.utils.corrplot import plot_corrmatrix
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats.mstats import ks_2samp
def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs):
""" Returns missing map plot like in amelia 2 package in R """
f, ax = plt.subplots(figsize=figsize)
if nmax < df.shape[0]:
df_s = df.sample(n=nmax) # sample rows if dataframe too big
return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs)
# class ColumnNaInfo
class NaImputer(DataExploration):
def __init__(self, *args, **kwargs):
super(NaImputer, self).__init__(*args, **kwargs)
self.get_data_isna()
@property
def nacols(self):
""" Returns a list of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
@property
def nacols_i(self):
""" Returns the index of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
def get_overlapping_matrix(self, normalize=True):
""" Look at missing values overlapping """
arr = self.data_isna.astype('float').values
arr = np.dot(arr.T, arr)
if normalize:
arr = arr / (arr.max(axis=1)[:, None])
index = self.nacols
res = pd.DataFrame(index=index, data=arr, columns=index)
return res
def infos_na(self, na_low=0.05, na_high=0.90):
""" Returns a dict with various infos about missing values """
infos = {}
infos['nacolcount'] = self.nacolcount()
infos['narowcount'] = self.narowcount()
infos['nb_total_na'] = self.total_missing
infos['many_na_col'] = self.manymissing(pct=na_high)
infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low)
infos['total_pct_na'] = self.nacolcount().Napercentage.mean()
return infos
def get_isna(self, col):
""" Returns a dummy variable indicating in a observation of a specific col
is na or not 0 -> not na , 1 -> na """
return self.data.loc[:, col].isnull().astype(int)
@property
def data_isna_m(self):
""" Returns merged dataframe (data, data_is_na)"""
return pd.concat((self.data, self.data_isna), axis=1)
def get_data_isna(self, prefix="is_na_", filter_nna=True):
""" Returns dataset with is_na columns from the a dataframe with missing values
Parameters
----------
prefix : str
the name of the prefix that will be append to the column name.
filter_nna: bool
True if you want remove column without missing values.
"""
if not filter_nna:
cols_to_keep = self.data.columns
else:
cols_to_keep = self.nacols
data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int)
data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep]
self.data_isna = data_isna
return self.data_isna
def get_corrna(self, *args, **kwargs):
""" Get matrix of correlation of na """
return self.data_isna.corr(*args, **kwargs)
def corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
print("This function is deprecated")
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs):
""" Plot conditionnal density plot from all columns or subset based on
is_na_colname 0 or 1"""
colname_na = prefix + colname
density_columns = self.data.columns if subset is None else subset
# filter only numeric values and different values from is_na_col
density_columns = [c for c in density_columns if (
c in self._dfnum and c != colname)]
print(density_columns)
for col in density_columns:
g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na,
size=size, *args, **kwargs)
g.map(sns.distplot, col)
def get_isna_mean(self, colname, prefix="is_na_"):
""" Returns empirical conditional expectatation, std, and sem of other numerical variable
for a certain colname with 0:not_a_na 1:na """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
measure_var = self.data.columns.tolist()
measure_var = [c for c in measure_var if c != colname]
functions = ['mean', 'std', 'sem']
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose()
def get_isna_ttest_s(self, colname_na, colname, type_test="ks"):
""" Returns tt test for colanme-na and a colname """
index_na = self.data.loc[:, colname_na].isnull()
measure_var = self.data.loc[:, colname].dropna() # drop na vars
if type_test == "ttest":
return ttest_ind(measure_var[index_na], measure_var[~index_na])
elif type_test == "ks":
return ks_2samp(measure_var[index_na], measure_var[~index_na])
def get_isna_ttest(self, colname_na, type_test="ks"):
res = pd.DataFrame()
col_to_compare = [c for c in self._dfnum if c !=
colname_na] # remove colname_na
for col in col_to_compare:
ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test)
res.loc[col, 'pvalue'] = ttest[1]
res.loc[col, 'statistic'] = ttest[0]
res.loc[col, 'type_test'] = type_test
return res
def isna_summary(self, colname, prefix="is_na_"):
""" Returns summary from one col with describe """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose()
def delete_narows(self, pct, index=False):
""" Delete rows with more na percentage than > perc in data
Return the index
Arguments
---------
pct : float
percentage of missing values, rows with more na percentage
than > perc are deleted
index : bool, default False
True if you want an index and not a Dataframe
verbose : bool, default False
True if you want to see percentage of data discarded
Returns
--------
- a pandas Dataframe with rows deleted if index=False, index of
columns to delete either
"""
index_missing = self.manymissing(pct=pct, axis=0, index=False)
pct_missing = len(index_missing) / len(self.data.index)
if verbose:
print("There is {0:.2%} rows matching conditions".format(
pct_missing))
if not index:
return self.data.loc[~index_missing, :]
else:
return index_missing
def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'):
""" fill values in a serie default with the mean for numeric or the most common
factor for categorical variable"""
if special_value is not None:
# "Missing for example"
return self.data.loc[:, colname].fillna(special_value)
elif self.data.loc[:, colname].dtype == float:
# fill with median
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median())
elif self.is_int_factor(colname, threshold_factor):
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0])
# fillna for datetime with the method provided by pandas
elif self.data.loc[:, colname].dtype == '<M8[ns]':
return self.data.loc[:, colname].fillna(method=date_method)
else:
# Fill with most common value
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0])
def basic_naimputation(self, columns_to_process=[], threshold=None):
""" this function will return a dataframe with na value replaced int
the columns selected by the mean or the most common value
Arguments
---------
- columns_to_process : list of columns name with na values you wish to fill
with the fillna_serie function
Returns
--------
- a pandas DataFrame with the columns_to_process filled with the fillena_serie
"""
# self.data = self.df.copy()
if threshold:
columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold)
self.data.loc[:, columns_to_process] = self.data.loc[
:, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name))
return self.data
def split_tt_na(self, colname, index=False):
""" Split the dataset returning the index of test , train """
index_na = self.data.loc[:, colname].isnull()
index_test = (index_na == True)
index_train = (index_na == False)
if index:
return index_test, index_train
else:
return self.data.loc[index_test, :], self.data.loc[index_train, :]
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
--- FILE SEPARATOR ---
import seaborn as sns
import matplotlib.pyplot as plt
def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
size=None, figsize=(12, 9), *args, **kwargs):
"""
Plot correlation matrix of the dataset
see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
"""
sns.set(context="paper", font="monospace")
f, ax = plt.subplots(figsize=figsize)
sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths,
annot=annot, annot_kws={"size": size}, *args, **kwargs)
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
|
{
"imported_by": [
"/test.py",
"/autoc/__init__.py"
],
"imports": [
"/autoc/utils/helpers.py",
"/autoc/utils/corrplot.py",
"/autoc/explorer.py"
]
}
|
ericfourrier/auto-clean
|
/autoc/outliersdetection.py
|
"""
@author: efourrier
Purpose : This is a simple experimental class to detect outliers. This class
can be used to detect missing values encoded as outlier (-999, -1, ...)
"""
from autoc.explorer import DataExploration, pd
import numpy as np
#from autoc.utils.helpers import cserie
from exceptions import NotNumericColumn
def iqr(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return np.percentile(ndarray, 75) - np.percentile(ndarray, 25)
def z_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.mean(ndarray)) / (np.std(ndarray))
def iqr_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (iqr(ndarray))
def mad_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745)
class OutliersDetection(DataExploration):
"""
this class focuses on identifying outliers
Parameters
----------
data : DataFrame
Examples
--------
* od = OutliersDetection(data = your_DataFrame)
* od.structure() : global structure of your DataFrame
"""
def __init__(self, *args, **kwargs):
super(OutliersDetection, self).__init__(*args, **kwargs)
self.strong_cutoff = {'cutoff_z': 6,
'cutoff_iqr': 6, 'cutoff_mad': 6}
self.basic_cutoff = {'cutoff_z': 3,
'cutoff_iqr': 2, 'cutoff_mad': 2}
def check_negative_value(self, colname):
""" this function will detect if there is at leat one
negative value and calculate the ratio negative postive/
"""
if not self.is_numeric(colname):
NotNumericColumn("The serie should be numeric values")
return sum(serie < 0)
def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]):
if not self.is_numeric(colname):
raise("auto-clean doesn't support outliers detection for Non numeric variable")
keys = [str(func.__name__) for func in scores]
df = pd.DataFrame(dict((key, func(self.data.loc[:, colname]))
for key, func in zip(keys, scores)))
df['is_outlier'] = 0
for s in keys:
cutoff_colname = "cutoff_{}".format(s.split('_')[0])
index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname]
df.loc[index_outliers, 'is_outlier'] = 1
return df
def check_negative_value(self):
""" this will return a the ratio negative/positve for each numeric
variable of the DataFrame
"""
return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name))
def outlier_detection_1d(self, cutoff_params, subset=None,
scores=[z_score, iqr_score, mad_score]):
""" Return a dictionnary with z_score,iqr_score,mad_score as keys and the
associate dataframe of distance as value of the dictionnnary"""
df = self.data.copy()
numeric_var = self._dfnum
if subset:
df = df.drop(subset, axis=1)
df = df.loc[:, numeric_var] # take only numeric variable
# if remove_constant_col:
# df = df.drop(self.constantcol(), axis = 1) # remove constant variable
# df_outlier = pd.DataFrame()
for col in df:
df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores)
df_temp.columns = [col + '_' +
col_name for col_name in df_temp.columns]
#df_outlier = pd.concat([df_outlier, df_temp], axis=1)
return df_temp
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
|
{
"imported_by": [
"/test.py"
],
"imports": [
"/autoc/explorer.py",
"/autoc/exceptions.py"
]
}
|
ericfourrier/auto-clean
|
/autoc/preprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : The purpose of this class is too automaticely transfrom a DataFrame
into a numpy ndarray in order to use an aglorithm
"""
#########################################################
# Import modules and global helpers
#########################################################
from autoc.explorer import DataExploration, pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from autoc.exceptions import NumericError
class PreProcessor(DataExploration):
subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other']
def __init__(self, *args, **kwargs):
super(PreProcessor, self).__init__(*args, **kwargs)
self.long_str_cutoff = 80
self.short_str_cutoff = 30
self.perc_unique_cutoff = 0.2
self.nb_max_levels = 20
def basic_cleaning(self,filter_nacols=True, drop_col=None,
filter_constantcol=True, filer_narows=True,
verbose=True, filter_rows_duplicates=True, inplace=False):
"""
Basic cleaning of the data by deleting manymissing columns,
constantcol, full missing rows, and drop_col specified by the user.
"""
col_to_remove = []
index_to_remove = []
if filter_nacols:
col_to_remove += self.nacols_full
if filter_constantcol:
col_to_remove += list(self.constantcol())
if filer_narows:
index_to_remove += cserie(self.narows_full)
if filter_rows_duplicates:
index_to_remove += cserie(self.data.duplicated())
if isinstance(drop_col, list):
col_to_remove += drop_col
elif isinstance(drop_col, str):
col_to_remove += [drop_col]
else:
pass
col_to_remove = list(set(col_to_remove))
index_to_remove = list(set(index_to_remove))
if verbose:
print("We are removing the folowing columns : {}".format(col_to_remove))
print("We are removing the folowing rows : {}".format(index_to_remove))
if inplace:
return self.data.drop(index_to_remove).drop(col_to_remove, axis=1)
else:
return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1)
def _infer_subtype_col(self, colname):
""" This fonction tries to infer subtypes in order to preprocess them
better for skicit learn. You can find the different subtypes in the class
variable subtypes
To be completed ....
"""
serie_col = self.data.loc[:, colname]
if serie_col.nunique() == 2:
return 'binary'
elif serie_col.dtype.kind == 'O':
if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff:
return "text_long"
elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels:
return 'text_categorical'
elif self.is_numeric(colname):
if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels:
return "ordinal"
else :
return "other"
def infer_subtypes(self):
""" Apply _infer_subtype_col to the whole DataFrame as a dictionnary """
return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns}
def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01):
""" Returns True if we detect in the serie a factor variable
A string factor is based on the following caracteristics :
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable
threshold_value : float
the nb of of unique value in percentage of the dataframe length
"""
# False for numeric columns
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
if self.is_numeric(colname):
return False
# False for categorical columns
if self.data.loc[:, colname].dtype == "category":
return False
unique_value = set()
for i, v in self.data.loc[:, colname], iteritems():
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(v)
return True
def get_factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" Return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable.
threshold_value : float
the nb of of unique value in percentage of the dataframe length.
index: bool
False, returns a list, True if you want an index.
"""
res = self.data.apply(lambda x: self.infer_categorical_str(x))
if index:
return res
else:
return cserie(res)
def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs):
factors_col = self.get_factors(*args, **kwargs)
if verbose:
print("We are converting following columns to categorical :{}".format(
factors_col))
if inplace:
self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category)
else:
return self.df.loc[:, factors_col].astype(category)
def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True):
""" Replace a variable with too many categories by grouping minor categories to one """
if self.data.loc[:, colname].nunique() < nb_max_levels:
if verbose:
print("{} has not been processed because levels < {}".format(
colname, nb_max_levels))
else:
if self.is_numeric(colname):
raise NumericError(
'{} is a numeric columns you cannot use this function'.format())
top_levels = self.data.loc[
:, colname].value_counts[0:nb_max_levels].index
self.data.loc[~self.data.loc[:, colname].isin(
top_levels), colname] = replace_value
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
|
{
"imported_by": [
"/autoc/__init__.py"
],
"imports": [
"/autoc/utils/helpers.py",
"/autoc/explorer.py",
"/autoc/exceptions.py"
]
}
|
ericfourrier/auto-clean
|
/test.py
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Automated test suites with unittest
run "python -m unittest -v test" in the module directory to run the tests
The clock decorator in utils will measure the run time of the test
"""
#########################################################
# Import Packages and helpers
#########################################################
import unittest
# internal helpers
# from autoc.utils.helpers import clock, create_test_df, removena_numpy, cserie
from autoc.utils.helpers import random_pmf, clock, create_test_df, cserie, simu, removena_numpy
from autoc.utils.getdata import get_dataset
from autoc.explorer import DataExploration
from autoc.naimputer import NaImputer
from autoc.outliersdetection import OutliersDetection
import pandas as pd
import numpy as np
flatten_list = lambda x: [y for l in x for y in flatten_list(
l)] if isinstance(x, list) else [x]
# flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x]
#########################################################
# Writing the tests
#########################################################
class TestDataExploration(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_df = create_test_df()
cls._test_dc = DataExploration(data=cls._test_df)
@clock
def test_to_lowercase(self):
df_lower = self._test_dc.to_lowercase()
self.assertNotEqual(id(df_lower), id(self._test_dc.data))
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['c'] * 300)==
df_lower.loc[:, 'character_variable_up1']).all())
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['d'] * 300)==
df_lower.loc[:, 'character_variable_up2']).all())
@clock
def test_copy(self):
exploration_copy = DataExploration(data=create_test_df(), copy=True)
self.assertEqual(id(self._test_df), id(self._test_dc.data))
self.assertNotEqual(id(self._test_df), id(exploration_copy.data))
@clock
def test_cserie(self):
char_var = cserie(self._test_dc.data.dtypes == "object")
self.assertIsInstance(char_var, list)
self.assertIn('character_variable', char_var)
@clock
def test_removena_numpy(self):
test_array = np.array([np.nan, 1, 2, np.nan])
self.assertTrue((removena_numpy(test_array) == np.array([1, 2])).all())
@clock
def test_sample_df(self):
self.assertEqual(len(self._test_dc.sample_df(pct=0.061)),
0.061 * float(self._test_dc.data.shape[0]))
@clock
def test_nrow(self):
self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0])
@clock
def test_col(self):
self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1])
@clock
def test_is_numeric(self):
self.assertTrue(self._test_dc.is_numeric("num_variable"))
self.assertTrue(self._test_dc.is_numeric("many_missing_70"))
self.assertFalse(self._test_dc.is_numeric("character_variable"))
@clock
def test_is_int_factor(self):
self.assertFalse(self._test_dc.is_int_factor("num_variable"))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.01))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.1))
self.assertFalse(self._test_dc.is_int_factor("int_factor_10", 0.005))
self.assertFalse(self._test_dc.is_int_factor("character_variable"))
@clock
def test_where_numeric(self):
self.assertEqual(cserie(self._test_dc.where_numeric().all()), self._test_dc._dfnum)
@clock
def test_total_missing(self):
self.assertEqual(self._test_dc.total_missing,
self._test_dc.data.isnull().sum().sum())
@clock
def test_None_count(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['None_100', 'Napercentage'], 0.1)
self.assertEqual(nacolcount.loc['None_100', 'Nanumber'], 100)
self.assertEqual(nacolcount.loc['None_na_200', 'Napercentage'], 0.2)
self.assertEqual(nacolcount.loc['None_na_200', 'Nanumber'], 200)
@clock
def test_nacolcount_capture_na(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0)
self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7)
@clock
def test_nacolcount_is_type_dataframe(self):
self.assertIsInstance(self._test_dc.nacolcount(),
pd.core.frame.DataFrame)
@clock
def test_narowcount_capture_na(self):
narowcount = self._test_dc.narowcount()
self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow)
#
# @clock
# def test_detect_other_na(self):
# other_na = self._test_dc.detect_other_na()
# self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_narowcount_is_type_dataframe(self):
narowcount = self._test_dc.narowcount()
self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_manymissing_capture(self):
manymissing = self._test_dc.manymissing(0.7)
self.assertIsInstance(manymissing, list)
self.assertIn('many_missing_70', manymissing)
self.assertIn('na_col', manymissing)
@clock
def test_nacols_full(self):
nacols_full = self._test_dc.nacols_full
self.assertIsInstance(nacols_full, list)
self.assertIn('na_col',nacols_full )
@clock
def test_narows_full(self):
test_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
test_df.loc[99, :] = np.nan
self.assertIn(99, DataExploration(test_df).narows_full)
self.assertNotIn(1, test_df)
@clock
def test_constant_col_capture(self):
constantcol = self._test_dc.constantcol()
self.assertIsInstance(constantcol, list)
self.assertIn('constant_col', constantcol)
self.assertIn('constant_col_num', constantcol)
self.assertIn('na_col', constantcol)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, 1000)
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.character_factor, 7)
@clock
def test_dfchar_check_col(self):
dfchar = self._test_dc._dfchar
self.assertIsInstance(dfchar, list)
self.assertNotIn('num_variable', dfchar)
self.assertIn('character_factor', dfchar)
self.assertIn('character_variable', dfchar)
self.assertNotIn('many_missing_70', dfchar)
@clock
def test_dfnum_check_col(self):
dfnum = self._test_dc._dfnum
self.assertIsInstance(dfnum, list)
self.assertIn('num_variable', dfnum)
self.assertNotIn('character_factor', dfnum)
self.assertNotIn('character_variable', dfnum)
self.assertIn('many_missing_70', dfnum)
@clock
def test_factors_check_col(self):
factors = self._test_dc.factors()
self.assertIsInstance(factors, list)
self.assertNotIn('num_factor', factors)
self.assertNotIn('character_variable', factors)
self.assertIn('character_factor', factors)
@clock
def test_detectkey_check_col(self):
detectkey = self._test_dc.detectkey()
self.assertIsInstance(detectkey, list)
self.assertIn('id', detectkey)
self.assertIn('member_id', detectkey)
@clock
def test_detectkey_check_col_dropna(self):
detectkeyna = self._test_dc.detectkey(dropna=True)
self.assertIn('id_na', detectkeyna)
self.assertIn('id', detectkeyna)
self.assertIn('member_id', detectkeyna)
@clock
def test_findupcol_check(self):
findupcol = self._test_dc.findupcol()
self.assertIn(['id', 'duplicated_column'], findupcol)
self.assertNotIn('member_id', flatten_list(findupcol))
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, len(self._test_dc.data.id))
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.num_factor, len(
pd.unique(self._test_dc.data.num_factor)))
@clock
def test_structure(self):
structure = self._test_dc.structure()
self.assertIsInstance(structure, pd.DataFrame)
self.assertEqual(len(self._test_dc.data),
structure.loc['na_col', 'nb_missing'])
self.assertEqual(len(self._test_dc.data), structure.loc[
'id', 'nb_unique_values'])
self.assertTrue(structure.loc['id', 'is_key'])
@clock
def test_nearzerovar(self):
nearzerovar = self._test_dc.nearzerovar(save_metrics=True)
self.assertIsInstance(nearzerovar, pd.DataFrame)
self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv))
self.assertIn('constant_col', cserie(nearzerovar.nzv))
self.assertIn('na_col', cserie(nearzerovar.nzv))
class TestNaImputer(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_na = NaImputer(data=create_test_df())
@clock
def test_fillna_serie(self):
test_serie = pd.Series([1, 3, np.nan, 5])
self.assertIsInstance(
self._test_na.fillna_serie(test_serie), pd.Series)
self.assertEqual(self._test_na.fillna_serie(test_serie)[2], 3.0)
@clock
def test_fillna_serie(self):
test_char_variable = self._test_na.fillna_serie('character_variable_fillna')
test_num_variable = self._test_na.fillna_serie('numeric_variable_fillna')
self.assertTrue(test_char_variable.notnull().any())
self.assertTrue(test_num_variable.notnull().any())
self.assertTrue((pd.Series(
['A'] * 300 + ['B'] * 200 + ['C'] * 200 + ['A'] * 300) == test_char_variable).all())
self.assertTrue(
(pd.Series([1] * 400 + [3] * 400 + [2] * 200) == test_num_variable).all())
@clock
def test_fill_low_na(self):
df_fill_low_na = self._test_na.basic_naimputation(columns_to_process=['character_variable_fillna',
'numeric_variable_fillna'])
df_fill_low_na_threshold = self._test_na.basic_naimputation(threshold=0.4)
self.assertIsInstance(df_fill_low_na, pd.DataFrame)
self.assertIsInstance(df_fill_low_na_threshold, pd.DataFrame)
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na.numeric_variable_fillna).all())
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na_threshold.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na_threshold.numeric_variable_fillna).all())
self.assertTrue(
sum(pd.isnull(df_fill_low_na_threshold.many_missing_70)) == 700)
class TestOutliersDetection(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
cls.outlier_d = OutliersDetection(cls.data)
@clock
def test_outlier_detection_serie_1d(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
@clock
def test_outlier_detection_serie_1d_with_na(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier_na', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
class TestHelper(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
@clock
def test_random_pmf(self):
self.assertAlmostEqual(len(random_pmf(10)), 10)
self.assertAlmostEqual(random_pmf(10).sum(), 1)
@clock
def test_simu(self):
pmf = random_pmf(4)
samples_unique = simu((np.array(['A', 'B']), np.array([0, 1])), 10)
self.assertTrue((samples_unique == 'B').all())
# class TestGetData(unittest.TestCase):
#
# @clock
# def test_getdata_titanic(self):
# """ Test if downloading titanic data is working """
# titanic = get_dataset('titanic')
# self.assertIsInstance(titanic, pd.DataFrame)
# self.assertEqual(titanic.shape[0], 891)
# self.assertEqual(titanic.shape[1], 15)
# Adding new tests sets
# def suite():
# suite = unittest.TestSuite()
# suite.addTest(TestPandasPatch('test_default_size'))
# return suite
# Other solution than calling main
#suite = unittest.TestLoader().loadTestsFromTestCase(TestPandasPatch)
#unittest.TextTestRunner(verbosity = 1 ).run(suite)
if __name__ == "__main__":
unittest.main(exit=False)
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
--- FILE SEPARATOR ---
"""
@author: efourrier
Purpose : This is a simple experimental class to detect outliers. This class
can be used to detect missing values encoded as outlier (-999, -1, ...)
"""
from autoc.explorer import DataExploration, pd
import numpy as np
#from autoc.utils.helpers import cserie
from exceptions import NotNumericColumn
def iqr(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return np.percentile(ndarray, 75) - np.percentile(ndarray, 25)
def z_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.mean(ndarray)) / (np.std(ndarray))
def iqr_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (iqr(ndarray))
def mad_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745)
class OutliersDetection(DataExploration):
"""
this class focuses on identifying outliers
Parameters
----------
data : DataFrame
Examples
--------
* od = OutliersDetection(data = your_DataFrame)
* od.structure() : global structure of your DataFrame
"""
def __init__(self, *args, **kwargs):
super(OutliersDetection, self).__init__(*args, **kwargs)
self.strong_cutoff = {'cutoff_z': 6,
'cutoff_iqr': 6, 'cutoff_mad': 6}
self.basic_cutoff = {'cutoff_z': 3,
'cutoff_iqr': 2, 'cutoff_mad': 2}
def check_negative_value(self, colname):
""" this function will detect if there is at leat one
negative value and calculate the ratio negative postive/
"""
if not self.is_numeric(colname):
NotNumericColumn("The serie should be numeric values")
return sum(serie < 0)
def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]):
if not self.is_numeric(colname):
raise("auto-clean doesn't support outliers detection for Non numeric variable")
keys = [str(func.__name__) for func in scores]
df = pd.DataFrame(dict((key, func(self.data.loc[:, colname]))
for key, func in zip(keys, scores)))
df['is_outlier'] = 0
for s in keys:
cutoff_colname = "cutoff_{}".format(s.split('_')[0])
index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname]
df.loc[index_outliers, 'is_outlier'] = 1
return df
def check_negative_value(self):
""" this will return a the ratio negative/positve for each numeric
variable of the DataFrame
"""
return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name))
def outlier_detection_1d(self, cutoff_params, subset=None,
scores=[z_score, iqr_score, mad_score]):
""" Return a dictionnary with z_score,iqr_score,mad_score as keys and the
associate dataframe of distance as value of the dictionnnary"""
df = self.data.copy()
numeric_var = self._dfnum
if subset:
df = df.drop(subset, axis=1)
df = df.loc[:, numeric_var] # take only numeric variable
# if remove_constant_col:
# df = df.drop(self.constantcol(), axis = 1) # remove constant variable
# df_outlier = pd.DataFrame()
for col in df:
df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores)
df_temp.columns = [col + '_' +
col_name for col_name in df_temp.columns]
#df_outlier = pd.concat([df_outlier, df_temp], axis=1)
return df_temp
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
--- FILE SEPARATOR ---
from autoc.explorer import DataExploration, pd
from autoc.utils.helpers import cserie
import seaborn as sns
import matplotlib.pyplot as plt
#from autoc.utils.helpers import cached_property
from autoc.utils.corrplot import plot_corrmatrix
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats.mstats import ks_2samp
def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs):
""" Returns missing map plot like in amelia 2 package in R """
f, ax = plt.subplots(figsize=figsize)
if nmax < df.shape[0]:
df_s = df.sample(n=nmax) # sample rows if dataframe too big
return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs)
# class ColumnNaInfo
class NaImputer(DataExploration):
def __init__(self, *args, **kwargs):
super(NaImputer, self).__init__(*args, **kwargs)
self.get_data_isna()
@property
def nacols(self):
""" Returns a list of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
@property
def nacols_i(self):
""" Returns the index of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
def get_overlapping_matrix(self, normalize=True):
""" Look at missing values overlapping """
arr = self.data_isna.astype('float').values
arr = np.dot(arr.T, arr)
if normalize:
arr = arr / (arr.max(axis=1)[:, None])
index = self.nacols
res = pd.DataFrame(index=index, data=arr, columns=index)
return res
def infos_na(self, na_low=0.05, na_high=0.90):
""" Returns a dict with various infos about missing values """
infos = {}
infos['nacolcount'] = self.nacolcount()
infos['narowcount'] = self.narowcount()
infos['nb_total_na'] = self.total_missing
infos['many_na_col'] = self.manymissing(pct=na_high)
infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low)
infos['total_pct_na'] = self.nacolcount().Napercentage.mean()
return infos
def get_isna(self, col):
""" Returns a dummy variable indicating in a observation of a specific col
is na or not 0 -> not na , 1 -> na """
return self.data.loc[:, col].isnull().astype(int)
@property
def data_isna_m(self):
""" Returns merged dataframe (data, data_is_na)"""
return pd.concat((self.data, self.data_isna), axis=1)
def get_data_isna(self, prefix="is_na_", filter_nna=True):
""" Returns dataset with is_na columns from the a dataframe with missing values
Parameters
----------
prefix : str
the name of the prefix that will be append to the column name.
filter_nna: bool
True if you want remove column without missing values.
"""
if not filter_nna:
cols_to_keep = self.data.columns
else:
cols_to_keep = self.nacols
data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int)
data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep]
self.data_isna = data_isna
return self.data_isna
def get_corrna(self, *args, **kwargs):
""" Get matrix of correlation of na """
return self.data_isna.corr(*args, **kwargs)
def corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
print("This function is deprecated")
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs):
""" Plot conditionnal density plot from all columns or subset based on
is_na_colname 0 or 1"""
colname_na = prefix + colname
density_columns = self.data.columns if subset is None else subset
# filter only numeric values and different values from is_na_col
density_columns = [c for c in density_columns if (
c in self._dfnum and c != colname)]
print(density_columns)
for col in density_columns:
g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na,
size=size, *args, **kwargs)
g.map(sns.distplot, col)
def get_isna_mean(self, colname, prefix="is_na_"):
""" Returns empirical conditional expectatation, std, and sem of other numerical variable
for a certain colname with 0:not_a_na 1:na """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
measure_var = self.data.columns.tolist()
measure_var = [c for c in measure_var if c != colname]
functions = ['mean', 'std', 'sem']
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose()
def get_isna_ttest_s(self, colname_na, colname, type_test="ks"):
""" Returns tt test for colanme-na and a colname """
index_na = self.data.loc[:, colname_na].isnull()
measure_var = self.data.loc[:, colname].dropna() # drop na vars
if type_test == "ttest":
return ttest_ind(measure_var[index_na], measure_var[~index_na])
elif type_test == "ks":
return ks_2samp(measure_var[index_na], measure_var[~index_na])
def get_isna_ttest(self, colname_na, type_test="ks"):
res = pd.DataFrame()
col_to_compare = [c for c in self._dfnum if c !=
colname_na] # remove colname_na
for col in col_to_compare:
ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test)
res.loc[col, 'pvalue'] = ttest[1]
res.loc[col, 'statistic'] = ttest[0]
res.loc[col, 'type_test'] = type_test
return res
def isna_summary(self, colname, prefix="is_na_"):
""" Returns summary from one col with describe """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose()
def delete_narows(self, pct, index=False):
""" Delete rows with more na percentage than > perc in data
Return the index
Arguments
---------
pct : float
percentage of missing values, rows with more na percentage
than > perc are deleted
index : bool, default False
True if you want an index and not a Dataframe
verbose : bool, default False
True if you want to see percentage of data discarded
Returns
--------
- a pandas Dataframe with rows deleted if index=False, index of
columns to delete either
"""
index_missing = self.manymissing(pct=pct, axis=0, index=False)
pct_missing = len(index_missing) / len(self.data.index)
if verbose:
print("There is {0:.2%} rows matching conditions".format(
pct_missing))
if not index:
return self.data.loc[~index_missing, :]
else:
return index_missing
def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'):
""" fill values in a serie default with the mean for numeric or the most common
factor for categorical variable"""
if special_value is not None:
# "Missing for example"
return self.data.loc[:, colname].fillna(special_value)
elif self.data.loc[:, colname].dtype == float:
# fill with median
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median())
elif self.is_int_factor(colname, threshold_factor):
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0])
# fillna for datetime with the method provided by pandas
elif self.data.loc[:, colname].dtype == '<M8[ns]':
return self.data.loc[:, colname].fillna(method=date_method)
else:
# Fill with most common value
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0])
def basic_naimputation(self, columns_to_process=[], threshold=None):
""" this function will return a dataframe with na value replaced int
the columns selected by the mean or the most common value
Arguments
---------
- columns_to_process : list of columns name with na values you wish to fill
with the fillna_serie function
Returns
--------
- a pandas DataFrame with the columns_to_process filled with the fillena_serie
"""
# self.data = self.df.copy()
if threshold:
columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold)
self.data.loc[:, columns_to_process] = self.data.loc[
:, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name))
return self.data
def split_tt_na(self, colname, index=False):
""" Split the dataset returning the index of test , train """
index_na = self.data.loc[:, colname].isnull()
index_test = (index_na == True)
index_train = (index_na == False)
if index:
return index_test, index_train
else:
return self.data.loc[index_test, :], self.data.loc[index_train, :]
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Get data from https://github.com/ericfourrier/autoc-datasets
"""
import pandas as pd
def get_dataset(name, *args, **kwargs):
"""Get a dataset from the online repo
https://github.com/ericfourrier/autoc-datasets (requires internet).
Parameters
----------
name : str
Name of the dataset 'name.csv'
"""
path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
return pd.read_csv(path, *args, **kwargs)
|
{
"imported_by": [],
"imports": [
"/autoc/utils/helpers.py",
"/autoc/outliersdetection.py",
"/autoc/explorer.py",
"/autoc/naimputer.py",
"/autoc/utils/getdata.py"
]
}
|
thinkAmi-sandbox/AWS_CDK-sample
|
/step_functions/app.py
|
#!/usr/bin/env python3
from aws_cdk import core
from step_functions.step_functions_stack import StepFunctionsStack
app = core.App()
# CFnのStack名を第2引数で渡す
StepFunctionsStack(app, 'step-functions')
app.synth()
|
import pathlib
from aws_cdk import core
from aws_cdk.aws_iam import PolicyStatement, Effect, ManagedPolicy, ServicePrincipal, Role
from aws_cdk.aws_lambda import AssetCode, LayerVersion, Function, Runtime
from aws_cdk.aws_s3 import Bucket
from aws_cdk.aws_stepfunctions import Task, StateMachine, Parallel
from aws_cdk.aws_stepfunctions_tasks import InvokeFunction, StartExecution
from settings import AWS_SCIPY_ARN
class StepFunctionsStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.lambda_path_base = pathlib.Path(__file__).parents[0].joinpath('lambda_function')
self.bucket = self.create_s3_bucket()
self.managed_policy = self.create_managed_policy()
self.role = self.create_role()
self.first_lambda = self.create_first_lambda()
self.second_lambda = self.create_other_lambda('second')
self.third_lambda = self.create_other_lambda('third')
self.error_lambda = self.create_other_lambda('error')
self.sub_state_machine = self.create_sub_state_machine()
self.main_state_machine = self.create_main_state_machine()
def create_s3_bucket(self):
return Bucket(
self,
'S3 Bucket',
bucket_name=f'sfn-bucket-by-aws-cdk',
)
def create_managed_policy(self):
statement = PolicyStatement(
effect=Effect.ALLOW,
actions=[
"s3:PutObject",
],
resources=[
f'{self.bucket.bucket_arn}/*',
]
)
return ManagedPolicy(
self,
'Managed Policy',
managed_policy_name='sfn_lambda_policy',
statements=[statement],
)
def create_role(self):
service_principal = ServicePrincipal('lambda.amazonaws.com')
return Role(
self,
'Role',
assumed_by=service_principal,
role_name='sfn_lambda_role',
managed_policies=[self.managed_policy],
)
def create_first_lambda(self):
function_path = str(self.lambda_path_base.joinpath('first'))
code = AssetCode(function_path)
scipy_layer = LayerVersion.from_layer_version_arn(
self, f'sfn_scipy_layer_for_first', AWS_SCIPY_ARN)
return Function(
self,
f'id_first',
# Lambda本体のソースコードがあるディレクトリを指定
code=code,
# Lambda本体のハンドラ名を指定
handler='lambda_function.lambda_handler',
# ランタイムの指定
runtime=Runtime.PYTHON_3_7,
# 環境変数の設定
environment={'BUCKET_NAME': self.bucket.bucket_name},
function_name='sfn_first_lambda',
layers=[scipy_layer],
memory_size=128,
role=self.role,
timeout=core.Duration.seconds(10),
)
def create_other_lambda(self, function_name):
function_path = str(self.lambda_path_base.joinpath(function_name))
return Function(
self,
f'id_{function_name}',
code=AssetCode(function_path),
handler='lambda_function.lambda_handler',
runtime=Runtime.PYTHON_3_7,
function_name=f'sfn_{function_name}_lambda',
memory_size=128,
timeout=core.Duration.seconds(10),
)
def create_sub_state_machine(self):
error_task = Task(
self,
'Error Task',
task=InvokeFunction(self.error_lambda),
)
# 2つめのTask
second_task = Task(
self,
'Second Task',
task=InvokeFunction(self.second_lambda),
# 渡されてきた項目を絞ってLambdaに渡す
input_path="$['first_result', 'parallel_no', 'message', 'context_name', 'const_value']",
# 結果は second_result という項目に入れる
result_path='$.second_result',
# 次のタスクに渡す項目は絞る
output_path="$['second_result', 'parallel_no']"
)
# エラーハンドリングを追加
second_task.add_catch(error_task, errors=['States.ALL'])
# 3つめのTask
third_task = Task(
self,
'Third Task',
task=InvokeFunction(self.third_lambda),
# third_lambdaの結果だけに差し替え
result_path='$',
)
# こちらもエラーハンドリングを追加
third_task.add_catch(error_task, errors=['States.ALL'])
# 2つ目のTaskの次に3つ目のTaskを起動するように定義
definition = second_task.next(third_task)
return StateMachine(
self,
'Sub StateMachine',
definition=definition,
state_machine_name='sfn_sub_state_machine',
)
def create_main_state_machine(self):
first_task = Task(
self,
'S3 Lambda Task',
task=InvokeFunction(self.first_lambda, payload={'message': 'Hello world'}),
comment='Main StateMachine',
)
parallel_task = self.create_parallel_task()
# 1番目のTaskの次に、パラレルなTask(StateMachine)をセット
definition = first_task.next(parallel_task)
return StateMachine(
self,
'Main StateMachine',
definition=definition,
state_machine_name='sfn_main_state_machine',
)
def create_parallel_task(self):
parallel_task = Parallel(
self,
'Parallel Task',
)
for i in range(1, 4):
sub_task = StartExecution(
self.sub_state_machine,
input={
'parallel_no': i,
'first_result.$': '$',
# first_taskのレスポンスにある、messageをセット
'message.$': '$.message',
# コンテキストオブジェクトの名前をセット
'context_name.$': '$$.State.Name',
# 固定値を2つ追加(ただ、Taskのinputでignore_valueは無視)
'const_value': 'ham',
'ignore_value': 'ignore',
},
)
invoke_sub_task = Task(
self,
f'Sub Task {i}',
task=sub_task,
)
parallel_task.branch(invoke_sub_task)
return parallel_task
|
{
"imported_by": [],
"imports": [
"/step_functions/step_functions/step_functions_stack.py"
]
}
|
greenmato/slackline-spots
|
/spots-api/map/api.py
|
from abc import ABC, ABCMeta, abstractmethod
from django.forms.models import model_to_dict
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from map.models import Spot
from map.models import Vote
from map.forms import SpotForm, VoteForm
class BaseApi(View):
__metaclass__ = ABCMeta
def _response(self, body):
response = {'data': body}
return JsonResponse(response)
def _error_response(self, status, error):
response = {'error': error}
return JsonResponse(response, status=status)
class BaseSpotsApi(BaseApi):
__metaclass__ = ABCMeta
def _spot_to_dict(self, spot):
spot_dict = model_to_dict(spot)
spot_dict['score'] = spot.get_score()
return spot_dict
# @method_decorator(csrf_exempt, name='dispatch')
class SpotsApi(BaseSpotsApi):
def get(self, request):
# TODO: only retrieve nearest spots and make them dynamically load as the map moves
nearby_spots = Spot.objects.all()
nearby_spots = list(map(self._spot_to_dict, nearby_spots))
return self._response(nearby_spots)
def post(self, request):
form = SpotForm(request.POST)
if form.is_valid():
new_spot = Spot(
name=request.POST['name'],
description=request.POST['description'],
latitude=request.POST['latitude'],
longitude=request.POST['longitude']
)
new_spot.save()
return self._response(self._spot_to_dict(new_spot))
return self._error_response(422, 'Invalid input.')
class SpotApi(BaseSpotsApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(self._spot_to_dict(spot))
# @method_decorator(csrf_exempt, name='dispatch')
class RatingsApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
ratings = Rating.objects.filter(spot=spot_id, rating_type=rating_type.id)
pass
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
pass
# @method_decorator(csrf_exempt, name='dispatch')
class VotesApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(spot.get_score())
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
form = VoteForm(request.POST)
if form.is_valid():
new_vote = Vote(spot=spot, positive=request.POST['positive'])
new_vote.save()
return self._response(model_to_dict(new_vote))
return self._error_response(422, 'Invalid input.')
|
from django import forms
from django.forms import ModelForm, Textarea
from map.models import Spot, Rating, Vote
class SpotForm(ModelForm):
class Meta:
model = Spot
fields = ['name', 'description', 'latitude', 'longitude']
widgets = {
'latitude': forms.HiddenInput(),
'longitude': forms.HiddenInput(),
}
class RatingForm(ModelForm):
class Meta:
model = Rating
fields = ['spot', 'rating_type', 'score']
widgets = {
'spot': forms.HiddenInput(),
'rating_type': forms.HiddenInput(),
}
class VoteForm(ModelForm):
class Meta:
model = Vote
fields = ['positive']
widgets = {
'positive': forms.HiddenInput(),
}
--- FILE SEPARATOR ---
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Spot(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=500)
latitude = models.DecimalField(max_digits=10, decimal_places=7)
longitude = models.DecimalField(max_digits=10, decimal_places=7)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
spot = "Spot %s - %s: %s" % (self.id, self.name, self.description)
return spot
def get_score(self):
votes = Vote.objects.filter(spot=self.id)
score = 0
for vote in votes:
score += 1 if vote.positive else -1
return score
def get_ratings_dict(self):
ratings = Rating.objects.filter(spot=self.id)
ratings_dict = {}
for rating in ratings:
if rating.rating_type.name in ratings_dict:
ratings_dict[rating.rating_type.name] += rating.score
else:
ratings_dict[rating.rating_type.name] = rating.score
for rating_type, score in ratings_dict.items():
ratings_dict[rating_type] = round((score / ratings.count()), 2)
return ratings_dict
class RatingType(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
rating_type = self.name
return rating_type
class Rating(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
rating_type = models.ForeignKey(RatingType, on_delete=models.CASCADE)
score = models.IntegerField(
validators=[
MaxValueValidator(10),
MinValueValidator(1)
]
)
class Vote(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
positive = models.BooleanField()
|
{
"imported_by": [
"/spots-api/map/urls.py"
],
"imports": [
"/spots-api/map/forms.py",
"/spots-api/map/models.py"
]
}
|
greenmato/slackline-spots
|
/spots-api/map/forms.py
|
from django import forms
from django.forms import ModelForm, Textarea
from map.models import Spot, Rating, Vote
class SpotForm(ModelForm):
class Meta:
model = Spot
fields = ['name', 'description', 'latitude', 'longitude']
widgets = {
'latitude': forms.HiddenInput(),
'longitude': forms.HiddenInput(),
}
class RatingForm(ModelForm):
class Meta:
model = Rating
fields = ['spot', 'rating_type', 'score']
widgets = {
'spot': forms.HiddenInput(),
'rating_type': forms.HiddenInput(),
}
class VoteForm(ModelForm):
class Meta:
model = Vote
fields = ['positive']
widgets = {
'positive': forms.HiddenInput(),
}
|
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class Spot(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=500)
latitude = models.DecimalField(max_digits=10, decimal_places=7)
longitude = models.DecimalField(max_digits=10, decimal_places=7)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
spot = "Spot %s - %s: %s" % (self.id, self.name, self.description)
return spot
def get_score(self):
votes = Vote.objects.filter(spot=self.id)
score = 0
for vote in votes:
score += 1 if vote.positive else -1
return score
def get_ratings_dict(self):
ratings = Rating.objects.filter(spot=self.id)
ratings_dict = {}
for rating in ratings:
if rating.rating_type.name in ratings_dict:
ratings_dict[rating.rating_type.name] += rating.score
else:
ratings_dict[rating.rating_type.name] = rating.score
for rating_type, score in ratings_dict.items():
ratings_dict[rating_type] = round((score / ratings.count()), 2)
return ratings_dict
class RatingType(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
rating_type = self.name
return rating_type
class Rating(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
rating_type = models.ForeignKey(RatingType, on_delete=models.CASCADE)
score = models.IntegerField(
validators=[
MaxValueValidator(10),
MinValueValidator(1)
]
)
class Vote(models.Model):
spot = models.ForeignKey(Spot, on_delete=models.CASCADE)
positive = models.BooleanField()
|
{
"imported_by": [
"/spots-api/map/api.py"
],
"imports": [
"/spots-api/map/models.py"
]
}
|
greenmato/slackline-spots
|
/spots-api/map/urls.py
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from map.views import MapView
from map.api import SpotsApi, SpotApi, RatingsApi, VotesApi
app_name = 'map'
urlpatterns = [
path('', MapView.as_view(), name='index'),
path('spots/', SpotsApi.as_view()),
path('spots/<int:spot_id>/', SpotApi.as_view()),
path('spots/<int:spot_id>/ratings/', RatingsApi.as_view()),
path('spots/<int:spot_id>/votes/', VotesApi.as_view()),
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from abc import ABC, ABCMeta, abstractmethod
from django.forms.models import model_to_dict
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from map.models import Spot
from map.models import Vote
from map.forms import SpotForm, VoteForm
class BaseApi(View):
__metaclass__ = ABCMeta
def _response(self, body):
response = {'data': body}
return JsonResponse(response)
def _error_response(self, status, error):
response = {'error': error}
return JsonResponse(response, status=status)
class BaseSpotsApi(BaseApi):
__metaclass__ = ABCMeta
def _spot_to_dict(self, spot):
spot_dict = model_to_dict(spot)
spot_dict['score'] = spot.get_score()
return spot_dict
# @method_decorator(csrf_exempt, name='dispatch')
class SpotsApi(BaseSpotsApi):
def get(self, request):
# TODO: only retrieve nearest spots and make them dynamically load as the map moves
nearby_spots = Spot.objects.all()
nearby_spots = list(map(self._spot_to_dict, nearby_spots))
return self._response(nearby_spots)
def post(self, request):
form = SpotForm(request.POST)
if form.is_valid():
new_spot = Spot(
name=request.POST['name'],
description=request.POST['description'],
latitude=request.POST['latitude'],
longitude=request.POST['longitude']
)
new_spot.save()
return self._response(self._spot_to_dict(new_spot))
return self._error_response(422, 'Invalid input.')
class SpotApi(BaseSpotsApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(self._spot_to_dict(spot))
# @method_decorator(csrf_exempt, name='dispatch')
class RatingsApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
ratings = Rating.objects.filter(spot=spot_id, rating_type=rating_type.id)
pass
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
pass
# @method_decorator(csrf_exempt, name='dispatch')
class VotesApi(BaseApi):
def get(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
return self._response(spot.get_score())
def post(self, request, spot_id):
spot = get_object_or_404(Spot, pk=spot_id)
form = VoteForm(request.POST)
if form.is_valid():
new_vote = Vote(spot=spot, positive=request.POST['positive'])
new_vote.save()
return self._response(model_to_dict(new_vote))
return self._error_response(422, 'Invalid input.')
--- FILE SEPARATOR ---
from django.shortcuts import render
from django.views import View
class MapView(View):
def get(self, request):
return render(request, 'map/index.html')
|
{
"imported_by": [],
"imports": [
"/spots-api/map/api.py",
"/spots-api/map/views.py"
]
}
|
katrii/ohsiha
|
/ohjelma/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from ohjelma.models import Song
from ohjelma.models import Track
import json
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
def index(request):
return HttpResponse('Welcome.')
class SongList(ListView):
model = Song
class SongView(DetailView):
model = Song
class SongCreate(CreateView):
model = Song
fields = ['song_name', 'song_artist', 'release_year']
success_url = reverse_lazy('song_list')
class SongUpdate(UpdateView):
model = Song
fields = ['song_name', 'song_artist', 'release_year']
success_url = reverse_lazy('song_list')
class SongDelete(DeleteView):
model = Song
success_url = reverse_lazy('song_list')
#Formatting the duration time
#Takes milliseconds as parameter and returns a string mm:ss
def MsFormat(milliseconds):
dur_s = (milliseconds/1000)%60
dur_s = int(dur_s)
if dur_s < 10:
dur_s = "0{}".format(dur_s)
dur_m = (milliseconds/(1000*60))%60
dur_m = int(dur_m)
dur = "{}:{}".format(dur_m, dur_s)
return dur
def TrackView(request, tracksyear):
Track.objects.all().delete() #Clear old info
query = 'year:{}'.format(tracksyear)
#Spotify developer keys
cid = '8f91d5aff7b54e1e93daa49f123d9ee9'
secret = 'f23421ee54b144cabeab9e2dbe9104a7'
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
#Lists for counting year averages
l_dance = []
l_en = []
l_aco = []
l_val = []
for i in range(0,100,50):
track_results = sp.search(q=query, type='track', limit=50,offset=i)
for i, t in enumerate(track_results['tracks']['items']):
id = t['id']
artist = t['artists'][0]['name']
song = t['name']
dur_ms = t['duration_ms']
pop = t['popularity']
dur = MsFormat(dur_ms)
trackinfo = sp.audio_features(id)
dance = trackinfo[0]['danceability']
en = trackinfo[0]['energy']
key = trackinfo[0]['key']
loud = trackinfo[0]['loudness']
spee = trackinfo[0]['speechiness']
aco = trackinfo[0]['acousticness']
inst = trackinfo[0]['instrumentalness']
live = trackinfo[0]['liveness']
val = trackinfo[0]['valence']
temp = trackinfo[0]['tempo']
l_dance.append(dance)
l_en.append(en)
l_aco.append(aco)
l_val.append(val)
Track.objects.create(track_id = id, track_artist = artist,
track_name = song, track_duration = dur, track_popularity = pop,
track_danceability = dance, track_energy = en, track_key = key,
track_loudness = loud, track_speechiness = spee,
track_acousticness = aco, track_instrumentalness = inst,
track_liveness = live, track_valence = val, track_tempo = temp)
avgdance = calculate_average(l_dance)*100
avgene = calculate_average(l_en)*100
avgaco = calculate_average(l_aco)*100
avgval = calculate_average(l_val)*100
alltracks = Track.objects.all()
context = {'alltracks': alltracks, 'year': tracksyear, 'avgdance': avgdance, 'avgene': avgene, 'avgaco': avgaco, 'avgval': avgval}
return render(request, 'tracks.html', context)
#View for each track detailed information
class Analysis(DetailView):
model = Track
#Takes a list (of numbers) as parameter, returns the average
def calculate_average(num):
sum_num = 0
for t in num:
sum_num = sum_num + t
avg = sum_num / len(num)
return avg
#View for analytics
def YearAnalysis(request):
#Spotify developer keys
cid = '8f91d5aff7b54e1e93daa49f123d9ee9'
secret = 'f23421ee54b144cabeab9e2dbe9104a7'
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
#Lists for saving yearly averages
dance = []
en = []
aco = []
val = []
years = []
most_populars = []
most_danceable = ""
best_dance = 0
happiest = ""
best_val = 0
most_acoustic = ""
best_aco = 0
most_energetic = ""
best_en = 0
for year in range (1980, 2020):
bestpop = 0
mostpop = ""
l_dance = []
l_en = []
l_aco = []
l_val = []
for i in range(0,100,50):
query = 'year:{}'.format(year)
track_results = sp.search(q=query, type='track', limit=50, offset=i)
for i, t in enumerate(track_results['tracks']['items']):
#Popularity check
pop = t['popularity']
if pop > bestpop:
mostpop = "{} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop)
bestpop = pop
elif pop == bestpop:
mostpop = mostpop + " AND {} by {}. Popularity: {}.".format(t['name'], t['artists'][0]['name'], pop)
id = t['id']
trackinfo = sp.audio_features(id)
d = trackinfo[0]['danceability']
e = trackinfo[0]['energy']
a = trackinfo[0]['acousticness']
v = trackinfo[0]['valence']
l_dance.append(d)
l_en.append(e)
l_aco.append(a)
l_val.append(v)
if d > best_dance:
most_danceable = "{} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d)
best_dance = d
elif d == best_dance:
most_danceable = most_danceable + " AND {} by {}. ({}) Danceability: {}.".format(t['name'], t['artists'][0]['name'], year, d)
if e > best_en:
most_energetic = "{} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e)
best_en = e
elif e == best_en:
most_energetic = most_energetic + " AND {} by {}. ({}) Energy: {}.".format(t['name'], t['artists'][0]['name'], year, e)
if a > best_aco:
most_acoustic = "{} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a)
best_aco = a
elif a == best_aco:
most_acoustic = most_acoustic + " AND {} by {}. ({}) Acousticness: {}.".format(t['name'], t['artists'][0]['name'], year, a)
if v > best_val:
happiest = "{} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v)
best_val = v
elif v == best_val:
happiest = happiest + " AND {} by {}. ({}) Valence: {}.".format(t['name'], t['artists'][0]['name'], year, v)
#Calculate year averages and add to lists
dance.append(calculate_average(l_dance))
en.append(calculate_average(l_en))
aco.append(calculate_average(l_aco))
val.append(calculate_average(l_val))
years.append(year)
most_populars.append(mostpop)
#Zip year and most popular song to a list of 2-valued tuples
yearly_populars = zip(years, most_populars)
context = {"years": years, "danceability": dance, "energy": en,
"acousticness": aco, "valence": val, "yearly_populars": yearly_populars,
"most_acoustic": most_acoustic, "most_energetic": most_energetic,
"most_danceable": most_danceable, "happiest": happiest}
return render(request, 'analysis.html', context)
|
from django.db import models
from django.urls import reverse
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('Date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
class Song(models.Model):
song_name = models.CharField(max_length=200)
song_artist = models.CharField(max_length = 200)
release_year = models.IntegerField(default=2000)
def __str__(self):
return self.song_name
def get_absolute_url(self):
return reverse('song_edit', kwargs={'pk': self.pk})
class Track(models.Model):
track_id = models.CharField(max_length=30)
track_name = models.CharField(max_length=500)
track_artist = models.CharField(max_length = 500)
track_duration = models.CharField(max_length = 10)
track_popularity = models.IntegerField(default=100)
track_danceability = models.FloatField(max_length=10)
track_energy = models.FloatField(max_length=10)
track_key = models.IntegerField(max_length=3)
track_loudness = models.FloatField(max_length=10)
track_speechiness = models.FloatField(max_length=10)
track_acousticness = models.FloatField(max_length=10)
track_instrumentalness = models.FloatField(max_length=10)
track_liveness = models.FloatField(max_length=10)
track_valence = models.FloatField(max_length=10)
track_tempo = models.FloatField(max_length=10)
def __str__(self):
return self.track_name
|
{
"imported_by": [],
"imports": [
"/ohjelma/models.py"
]
}
|
lukasld/Flask-Video-Editor
|
/app/api/VideoProcessing.py
|
from werkzeug.utils import secure_filename
from functools import partial
import subprocess as sp
import time
import skvideo.io
import numpy as np
import threading
import ffmpeg
import shlex
import cv2
import re
from PIL import Image
from werkzeug.datastructures import FileStorage as FStorage
from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \
VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION
from . import utils
from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage
from . decorators import exception_handler
FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3
FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT)
FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet'
ID_LEN = 32
class Frame:
def __init__(self, id=None):
self.id = id
@exception_handler(ex=IncorrectVideoFormat, type=2)
def from_bytes(self, in_bytes: bytes) -> np.ndarray:
"""
"""
frame_arr = np.frombuffer(in_bytes, np.uint8)
f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3])
return utils.bgr_to_rgb(f_arr)
def f_save(self, frame: np.ndarray, frame_id: str) -> None:
upload_path = utils.create_frame_path(frame_id)
if utils.is_rgb(frame):
Image.fromarray(frame).save(upload_path)
return
utils.img_from_greyscale(frame).save(upload_path)
return
def get_by_idx(self, frame_idx):
vid = utils.create_vid_path(self.id)
cap = cv2.VideoCapture(vid)
cap.set(1, frame_idx)
_, frame = cap.read()
return frame
class VideoUploader(Frame):
def __init__(self):
id = utils.id_generator(ID_LEN)
super().__init__(id)
self.frame_count = 0
def upload_from_bytestream(self, byte_stream: FStorage):
video_f_path = utils.create_vid_path(self.id)
sk_writer = utils.create_sk_video_writer(video_f_path)
sh_command = shlex.split(FFMPEG_COMMAND)
process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8)
thread = threading.Thread(target=self._writer, args=(process, byte_stream, ))
thread.start()
while True:
in_bytes = process.stdout.read(FRAME_SIZE)
if not in_bytes: break
frame = self.from_bytes(in_bytes)
self.frame_count += 1
if self.frame_count == 1: self.f_save(frame, self.id)
sk_writer.writeFrame(frame)
thread.join()
sk_writer.close()
def _writer(self, process, byte_stream):
for chunk in iter(partial(byte_stream.read, 1024), b''):
process.stdin.write(chunk)
try:
process.stdin.close()
except (BrokenPipeError):
pass
class Filter:
def __init__(self, img=None):
self.img = img
def applyCanny(self, params):
if 'thresh1' in params and 'thresh2' in params:
gs_img = self.applyGreyScale(params)
return cv2.Canny(gs_img,
int(params['thresh1']),
int(params['thresh2']))
raise InvalidFilterParams(3, 'canny')
def applyGauss(self, params):
if 'ksize_x' and 'ksize_y' in params and \
params['ksize_x'] % 2 != 0 and \
params['ksize_y'] % 2 != 0:
g_img = self.img.copy()
if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img)
return cv2.GaussianBlur(g_img,
(int(params["ksize_x"]), int(params["ksize_y"])), 0)
raise InvalidFilterParams(3, 'gauss')
def applyGreyScale(self, _):
c_img = self.img.copy()
return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY)
def applyLaplacian(self, params):
gs_img = self.applyGreyScale(params)
return cv2.Laplacian(gs_img, cv2.CV_8U)
def run_func(self, params):
if params["type"] in self.filter_map:
func = self.filter_map[params["type"]].__get__(self, type(self))
return func(params)
raise InvalidFilterParams(2)
def _default(self, _):
return utils.bgr_to_rgb(self.img)
filter_map = {'canny': applyCanny,
'gauss': applyGauss,
'greyscale': applyGreyScale,
'laplacian': applyLaplacian,
'': _default}
class VideoDownloader(Frame, Filter):
def __init__(self, fps, vid_range=None):
Frame.__init__(self)
Filter.__init__(self)
self.fps = fps
self.vid_range = vid_range
self.curr_f_frame = None
if vid_range:
self.range_min = vid_range[0]
self.range_max = vid_range[1]
def download(self, s_id, tot_video_frames, params):
f_vid_name = f'{s_id}_{params["type"]}'
video_f_path = utils.create_vid_path(f_vid_name)
local_vid = cv2.VideoCapture(utils.create_vid_path(s_id))
vid_writer = utils.create_sk_video_writer(video_f_path, self.fps)
for i in range(tot_video_frames-1):
utils.set_cache_f_count(s_id, 'd', i)
_, curr_frame = local_vid.read()
if curr_frame is None: break
self.img = curr_frame
f_frame = self._filter_apply(i, params)
vid_writer.writeFrame(f_frame)
vid_writer.close()
return f_vid_name
def _filter_apply(self, i, params):
"""
we simply check if a range is given,
then if we get a gs-img from the filter we add three dimensions
"""
if self.vid_range:
if(i >= self.vid_range[0] and
i <= self.vid_range[1]):
f_frame = self.run_func(params)
if not utils.is_rgb(f_frame):
return np.dstack(3*[f_frame])
return f_frame
else:
return self.run_func({"type":""})
else:
return self.run_func(params)
|
from flask import request, jsonify
from functools import wraps
from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat
"""
Almost like an Architect - makes decorations
"""
def decorator_maker(func):
def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = func(does_return, req_c_type, req_type, arg, session)
if does_return:
return fn(result)
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
return param_decorator
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_param_check(does_return, req_c_type, req_type, arg, session):
check_content_type(req_c_type)
return check_correct_filter_params(session)
def check_content_type(req_c_type):
if not request.content_type.startswith(req_c_type):
raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400)
def check_correct_filter_params(session):
if request.data:
data = request.get_json()
f_params = data['filter_params']
if 'filter_params' not in data:
raise InvalidFilterParams(1)
elif 'type' not in f_params:
raise InvalidFilterParams(1)
if 'download' in request.url:
if 'fps' not in data:
raise InvalidFilterParams(1)
if 'max_f' in f_params and 'min_f' in f_params:
max_fr = session['video_frame_count']
min_f_raw = f_params['min_f']
max_f_raw = f_params['max_f']
if min_f_raw == "": min_f_raw = 0
if max_f_raw == "": max_f_raw = max_fr
min_f = _check_for_req_type(int, min_f_raw, 4)
max_f = _check_for_req_type(int, max_f_raw, 4)
a = check_bounds(min_f_raw, max_fr)
b = check_bounds(max_f_raw, max_fr)
return sorted([a, b])
def _check_for_req_type(req_type, val, ex):
try:
req_type(val)
except Exception:
raise InvalidFilterParams(ex)
return val
parameter_check = decorator_maker(wrap_param_check)
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session):
check_arg_urls(req_type, arg)
frame_idx = request.view_args[arg]
return check_bounds(frame_idx, session['video_frame_count'])
def check_arg_urls(req_type, arg):
try:
req_type(request.view_args[arg])
except ValueError:
raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400)
def check_bounds(frame_idx, max_frames):
f_max = int(max_frames)
f_idx = int(frame_idx)
if f_idx > f_max:
f_idx = f_max-50
elif f_idx < 1:
f_idx = 1
return f_idx
url_arg_check = decorator_maker(wrap_url_arg_check)
"""
Checks Video Metadata
"""
def wrap_metadata_check(does_return, req_c_type, req_type, arg, session):
check_metadata(req_type)
def check_metadata(req_type):
byteStream = request.files['file']
vid_type = byteStream.__dict__['headers'].get('Content-Type')
if vid_type != req_type:
raise IncorrectVideoFormat(1)
metadata_check = decorator_maker(wrap_metadata_check)
"""
Excpetion Handler for non-Endpoints
"""
def exception_handler(fn=None, ex=None, type=None, pas=False):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
if not pas:
raise ex(type)
pass
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
--- FILE SEPARATOR ---
import sys
import traceback
from flask import jsonify, request
from . import api
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message='', status_code=None):
super().__init__()
self.message = message
self.path = request.path
if status_code is None:
self.status_code = InvalidAPIUsage.status_code
def to_dict(self):
rv = {}
rv['path'] = self.path
rv['status'] = self.status_code
rv['message'] = self.message
return rv
class IncorrectVideoFormat(InvalidAPIUsage):
def __init__(self, message_id):
super().__init__()
self.message = self.msg[message_id]
msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
2:'Incorrect video dimensions: only 720p supported (1280*720)'}
class InvalidFilterParams(InvalidAPIUsage):
def __init__(self, message_id, filter_name=''):
super().__init__()
self.message = self.msg(message_id, filter_name)
def msg(self, id, filter_name):
# TODO:Lukas [07252021] messges could be stored in static files as JSON
avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
or for default preview, {"filter_params":{"type":""}}',
2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
}
return avail_msg[id]
@api.errorhandler(InvalidAPIUsage)
def invalid_api_usage(e):
return jsonify(e.to_dict()), 400
|
{
"imported_by": [
"/app/api/videoApi.py"
],
"imports": [
"/app/api/decorators.py",
"/app/api/errors.py"
]
}
|
lukasld/Flask-Video-Editor
|
/app/api/decorators.py
|
from flask import request, jsonify
from functools import wraps
from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat
"""
Almost like an Architect - makes decorations
"""
def decorator_maker(func):
def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = func(does_return, req_c_type, req_type, arg, session)
if does_return:
return fn(result)
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
return param_decorator
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_param_check(does_return, req_c_type, req_type, arg, session):
check_content_type(req_c_type)
return check_correct_filter_params(session)
def check_content_type(req_c_type):
if not request.content_type.startswith(req_c_type):
raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400)
def check_correct_filter_params(session):
if request.data:
data = request.get_json()
f_params = data['filter_params']
if 'filter_params' not in data:
raise InvalidFilterParams(1)
elif 'type' not in f_params:
raise InvalidFilterParams(1)
if 'download' in request.url:
if 'fps' not in data:
raise InvalidFilterParams(1)
if 'max_f' in f_params and 'min_f' in f_params:
max_fr = session['video_frame_count']
min_f_raw = f_params['min_f']
max_f_raw = f_params['max_f']
if min_f_raw == "": min_f_raw = 0
if max_f_raw == "": max_f_raw = max_fr
min_f = _check_for_req_type(int, min_f_raw, 4)
max_f = _check_for_req_type(int, max_f_raw, 4)
a = check_bounds(min_f_raw, max_fr)
b = check_bounds(max_f_raw, max_fr)
return sorted([a, b])
def _check_for_req_type(req_type, val, ex):
try:
req_type(val)
except Exception:
raise InvalidFilterParams(ex)
return val
parameter_check = decorator_maker(wrap_param_check)
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session):
check_arg_urls(req_type, arg)
frame_idx = request.view_args[arg]
return check_bounds(frame_idx, session['video_frame_count'])
def check_arg_urls(req_type, arg):
try:
req_type(request.view_args[arg])
except ValueError:
raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400)
def check_bounds(frame_idx, max_frames):
f_max = int(max_frames)
f_idx = int(frame_idx)
if f_idx > f_max:
f_idx = f_max-50
elif f_idx < 1:
f_idx = 1
return f_idx
url_arg_check = decorator_maker(wrap_url_arg_check)
"""
Checks Video Metadata
"""
def wrap_metadata_check(does_return, req_c_type, req_type, arg, session):
check_metadata(req_type)
def check_metadata(req_type):
byteStream = request.files['file']
vid_type = byteStream.__dict__['headers'].get('Content-Type')
if vid_type != req_type:
raise IncorrectVideoFormat(1)
metadata_check = decorator_maker(wrap_metadata_check)
"""
Excpetion Handler for non-Endpoints
"""
def exception_handler(fn=None, ex=None, type=None, pas=False):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
if not pas:
raise ex(type)
pass
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
|
import sys
import traceback
from flask import jsonify, request
from . import api
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message='', status_code=None):
super().__init__()
self.message = message
self.path = request.path
if status_code is None:
self.status_code = InvalidAPIUsage.status_code
def to_dict(self):
rv = {}
rv['path'] = self.path
rv['status'] = self.status_code
rv['message'] = self.message
return rv
class IncorrectVideoFormat(InvalidAPIUsage):
def __init__(self, message_id):
super().__init__()
self.message = self.msg[message_id]
msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
2:'Incorrect video dimensions: only 720p supported (1280*720)'}
class InvalidFilterParams(InvalidAPIUsage):
def __init__(self, message_id, filter_name=''):
super().__init__()
self.message = self.msg(message_id, filter_name)
def msg(self, id, filter_name):
# TODO:Lukas [07252021] messges could be stored in static files as JSON
avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
or for default preview, {"filter_params":{"type":""}}',
2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
}
return avail_msg[id]
@api.errorhandler(InvalidAPIUsage)
def invalid_api_usage(e):
return jsonify(e.to_dict()), 400
|
{
"imported_by": [
"/app/api/VideoProcessing.py"
],
"imports": [
"/app/api/errors.py"
]
}
|
lukasld/Flask-Video-Editor
|
/app/api/videoApi.py
|
import os
from flask import Flask, request, redirect, \
url_for, session, jsonify, send_from_directory, make_response, send_file
from . import api
from . import utils
from .. import VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION, VIDEO_EXTENSION, CACHE
from . VideoProcessing import Frame, VideoUploader, VideoDownloader, Filter
from . decorators import parameter_check, url_arg_check, metadata_check
from . errors import InvalidAPIUsage
@api.route('/upload/', methods=['POST'])
@parameter_check(does_return=False, req_c_type='multipart/form-data')
@metadata_check(does_return=False, req_type='video/mp4')
def upload_video():
"""
uploads the video
"""
byteStream = request.files['file']
vu = VideoUploader()
vu.upload_from_bytestream(byteStream)
session['s_id'] = vu.id
f_c = utils.framecount_from_vid_id(vu.id)
session['video_frame_count'] = f_c
session['is_uploaded'] = True
return jsonify({'status' : '201',
'message' : 'video uploaded!'}), 201
@api.route('/preview/', defaults={'frame_idx':1}, methods=['GET'])
@api.route('/preview/<frame_idx>/', methods=['GET', 'POST'])
@parameter_check(does_return=False, req_c_type='application/json')
@url_arg_check(does_return=True, req_type=int, arg='frame_idx', session=session)
def preview_thumbnail(frame_idx):
"""
Preview a frame by index, given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
filter_params = data['filter_params']
session['filter_params'] = filter_params
frame = Frame(session['s_id'])
frame_i = frame.get_by_idx(frame_idx)
filter_frame = Filter(frame_i).run_func(filter_params)
frame.f_save(filter_frame, session['s_id'])
return send_from_directory(directory=f'{FRAMES_UPLOAD_PATH}',
path=f'{session["s_id"]}{IMG_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/download/', methods=['POST'])
@parameter_check(does_return=True, req_c_type='application/json', session=session)
def download_video(vid_range):
"""
Download a video given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
fps = data['fps']
filter_params = data['filter_params']
frame_count = session['video_frame_count']
vd = VideoDownloader(fps, vid_range)
filter_vid = vd.download(session['s_id'], frame_count, filter_params)
session['is_downloaded'] = True
return send_from_directory(directory=f'{VIDEO_UPLOAD_PATH}',
path=f'{filter_vid}{VIDEO_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/status/', methods=['GET'])
@parameter_check(req_c_type='application/json')
def status():
"""
The progress of the user, uploaded, download / frames
"""
resp = {}
try:
if session['is_uploaded']:
resp["upload"] = "done"
if CACHE.get(f"{session['s_id']}_d"):
d_status = CACHE.get(f"{session['s_id']}_d")
resp["downloaded_frames"] = f'{d_status}/{session["video_frame_count"]}'
if session["is_downloaded"]:
resp["is_downloaded"] = True
except KeyError:
pass
return jsonify({"status" : resp}), 200
|
from werkzeug.utils import secure_filename
from functools import partial
import subprocess as sp
import time
import skvideo.io
import numpy as np
import threading
import ffmpeg
import shlex
import cv2
import re
from PIL import Image
from werkzeug.datastructures import FileStorage as FStorage
from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \
VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION
from . import utils
from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage
from . decorators import exception_handler
FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3
FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT)
FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet'
ID_LEN = 32
class Frame:
def __init__(self, id=None):
self.id = id
@exception_handler(ex=IncorrectVideoFormat, type=2)
def from_bytes(self, in_bytes: bytes) -> np.ndarray:
"""
"""
frame_arr = np.frombuffer(in_bytes, np.uint8)
f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3])
return utils.bgr_to_rgb(f_arr)
def f_save(self, frame: np.ndarray, frame_id: str) -> None:
upload_path = utils.create_frame_path(frame_id)
if utils.is_rgb(frame):
Image.fromarray(frame).save(upload_path)
return
utils.img_from_greyscale(frame).save(upload_path)
return
def get_by_idx(self, frame_idx):
vid = utils.create_vid_path(self.id)
cap = cv2.VideoCapture(vid)
cap.set(1, frame_idx)
_, frame = cap.read()
return frame
class VideoUploader(Frame):
def __init__(self):
id = utils.id_generator(ID_LEN)
super().__init__(id)
self.frame_count = 0
def upload_from_bytestream(self, byte_stream: FStorage):
video_f_path = utils.create_vid_path(self.id)
sk_writer = utils.create_sk_video_writer(video_f_path)
sh_command = shlex.split(FFMPEG_COMMAND)
process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8)
thread = threading.Thread(target=self._writer, args=(process, byte_stream, ))
thread.start()
while True:
in_bytes = process.stdout.read(FRAME_SIZE)
if not in_bytes: break
frame = self.from_bytes(in_bytes)
self.frame_count += 1
if self.frame_count == 1: self.f_save(frame, self.id)
sk_writer.writeFrame(frame)
thread.join()
sk_writer.close()
def _writer(self, process, byte_stream):
for chunk in iter(partial(byte_stream.read, 1024), b''):
process.stdin.write(chunk)
try:
process.stdin.close()
except (BrokenPipeError):
pass
class Filter:
def __init__(self, img=None):
self.img = img
def applyCanny(self, params):
if 'thresh1' in params and 'thresh2' in params:
gs_img = self.applyGreyScale(params)
return cv2.Canny(gs_img,
int(params['thresh1']),
int(params['thresh2']))
raise InvalidFilterParams(3, 'canny')
def applyGauss(self, params):
if 'ksize_x' and 'ksize_y' in params and \
params['ksize_x'] % 2 != 0 and \
params['ksize_y'] % 2 != 0:
g_img = self.img.copy()
if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img)
return cv2.GaussianBlur(g_img,
(int(params["ksize_x"]), int(params["ksize_y"])), 0)
raise InvalidFilterParams(3, 'gauss')
def applyGreyScale(self, _):
c_img = self.img.copy()
return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY)
def applyLaplacian(self, params):
gs_img = self.applyGreyScale(params)
return cv2.Laplacian(gs_img, cv2.CV_8U)
def run_func(self, params):
if params["type"] in self.filter_map:
func = self.filter_map[params["type"]].__get__(self, type(self))
return func(params)
raise InvalidFilterParams(2)
def _default(self, _):
return utils.bgr_to_rgb(self.img)
filter_map = {'canny': applyCanny,
'gauss': applyGauss,
'greyscale': applyGreyScale,
'laplacian': applyLaplacian,
'': _default}
class VideoDownloader(Frame, Filter):
def __init__(self, fps, vid_range=None):
Frame.__init__(self)
Filter.__init__(self)
self.fps = fps
self.vid_range = vid_range
self.curr_f_frame = None
if vid_range:
self.range_min = vid_range[0]
self.range_max = vid_range[1]
def download(self, s_id, tot_video_frames, params):
f_vid_name = f'{s_id}_{params["type"]}'
video_f_path = utils.create_vid_path(f_vid_name)
local_vid = cv2.VideoCapture(utils.create_vid_path(s_id))
vid_writer = utils.create_sk_video_writer(video_f_path, self.fps)
for i in range(tot_video_frames-1):
utils.set_cache_f_count(s_id, 'd', i)
_, curr_frame = local_vid.read()
if curr_frame is None: break
self.img = curr_frame
f_frame = self._filter_apply(i, params)
vid_writer.writeFrame(f_frame)
vid_writer.close()
return f_vid_name
def _filter_apply(self, i, params):
"""
we simply check if a range is given,
then if we get a gs-img from the filter we add three dimensions
"""
if self.vid_range:
if(i >= self.vid_range[0] and
i <= self.vid_range[1]):
f_frame = self.run_func(params)
if not utils.is_rgb(f_frame):
return np.dstack(3*[f_frame])
return f_frame
else:
return self.run_func({"type":""})
else:
return self.run_func(params)
--- FILE SEPARATOR ---
import sys
import traceback
from flask import jsonify, request
from . import api
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message='', status_code=None):
super().__init__()
self.message = message
self.path = request.path
if status_code is None:
self.status_code = InvalidAPIUsage.status_code
def to_dict(self):
rv = {}
rv['path'] = self.path
rv['status'] = self.status_code
rv['message'] = self.message
return rv
class IncorrectVideoFormat(InvalidAPIUsage):
def __init__(self, message_id):
super().__init__()
self.message = self.msg[message_id]
msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
2:'Incorrect video dimensions: only 720p supported (1280*720)'}
class InvalidFilterParams(InvalidAPIUsage):
def __init__(self, message_id, filter_name=''):
super().__init__()
self.message = self.msg(message_id, filter_name)
def msg(self, id, filter_name):
# TODO:Lukas [07252021] messges could be stored in static files as JSON
avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
or for default preview, {"filter_params":{"type":""}}',
2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
}
return avail_msg[id]
@api.errorhandler(InvalidAPIUsage)
def invalid_api_usage(e):
return jsonify(e.to_dict()), 400
|
{
"imported_by": [],
"imports": [
"/app/api/VideoProcessing.py",
"/app/api/errors.py"
]
}
|
junprog/contrastive-baseline
|
/linear_eval.py
|
import os
import argparse
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
from datasets.cifar10 import get_simsiam_dataset
from models.create_linear_eval_model import LinearEvalModel
from utils.visualizer import AccLossGraphPloter
from utils.logger import setlogger
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--save-dir', default='/mnt/hdd02/contrastive-learn/0113-193048',
help='model directory')
parser.add_argument('--device', default='0', help='assign device')
parser.add_argument('--arch', default='vgg19', help='model architecture')
parser.add_argument('--max-epoch', default=100, type=int, help='train epoch')
parser.add_argument('--crop-size', default=224, type=int, help='input size')
parser.add_argument('--batch-size', default=512, type=int, help='input size')
parser.add_argument('--lr', default=1e-1, type=float, help='learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
plotter = AccLossGraphPloter(args.save_dir)
setlogger(os.path.join(args.save_dir, 'eval.log')) # set logger
datasets = {x: get_simsiam_dataset(args, x) for x in ['linear_train', 'linear_val']}
dataloaders = {x: DataLoader(datasets[x],
batch_size=(args.batch_size),
shuffle=(True if x == 'linear_train' else False),
num_workers=8,
pin_memory=(True if x == 'linear_train' else False)) for x in ['linear_train', 'linear_val']}
device = torch.device('cuda')
model = LinearEvalModel(arch=args.arch)
model.weight_init(args.save_dir, device, args.arch) ## initialize & freeze
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.1)
## Training & Test Roop
model.to(device)
for epoch in range(args.max_epoch):
model.train()
losses, acc, step, total = 0., 0., 0., 0.
for data, target in dataloaders['linear_train']:
data, target = data.to(device), target.to(device)
logits = model(data)
optimizer.zero_grad()
loss = criterion(logits, target)
loss.backward()
losses += loss.item()
optimizer.step()
scheduler.step()
pred = F.softmax(logits, dim=-1).max(-1)[1]
acc += pred.eq(target).sum().item()
step += 1
total += target.size(0)
tr_loss = losses / step
tr_acc = acc / total * 100.
logging.info('[Train Epoch: {0:2d}], loss: {1:.3f}, acc: {2:.3f}'.format(epoch, tr_loss, tr_acc))
model.eval()
losses, acc, step, total = 0., 0., 0., 0.
with torch.no_grad():
for data, target in dataloaders['linear_val']:
data, target = data.to(device), target.to(device)
logits = model(data)
loss = criterion(logits, target)
losses += loss.item()
pred = F.softmax(logits, dim=-1).max(-1)[1]
acc += pred.eq(target).sum().item()
step += 1
total += target.size(0)
vl_loss = losses / step
vl_acc = acc / total * 100.
logging.info('[Test Epoch: {0:2d}], loss: {1:.3f} acc: {2:.2f}'.format(epoch, vl_loss, vl_acc))
plotter(epoch, tr_acc, vl_acc, tr_loss, vl_loss, args.arch)
|
import os
from collections import OrderedDict
import torch
import torch.nn as nn
import torchvision.models as models
class LinearEvalModel(nn.Module):
def __init__(self, arch='vgg19', dim=512, num_classes=10):
super().__init__()
if arch == 'vgg19':
self.features = models.vgg19().features
if arch == 'vgg19_bn':
self.features = models.vgg19_bn().features
elif arch == 'resnet18':
resnet18 = models.resnet18(pretrained=False)
self.features = nn.Sequential(*list(resnet18.children())[:-1])
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(dim, num_classes)
def weight_init(self, weight_path, device, arch):
state_dict = torch.load(os.path.join(weight_path, 'best_model.pth'), device)
new_state_dict = OrderedDict()
if 'resnet' in arch:
for k, v in state_dict.items():
if 'encoder' in k:
k = k.replace('encoder.', '')
new_state_dict[k] = v
self.features.load_state_dict(new_state_dict)
elif 'vgg' in arch:
for k, v in state_dict.items():
if 'encoder' in k:
k = k.replace('encoder.0.', '')
new_state_dict[k] = v
self.features.load_state_dict(new_state_dict)
for m in self.features.parameters():
m.requires_grad = False
def forward(self, x):
x = self.features(x)
x = self.avg_pool(x)
x = x.squeeze()
out = self.fc(x)
return out
--- FILE SEPARATOR ---
import os
import numpy as np
from PIL import Image
import torch
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def invnorm(img, N):
img = img[N,:,:,:].to('cpu').detach().numpy().copy()
img = img.transpose(1,2,0)
img = img*std+mean
return img
class ImageDisplayer:
def __init__(self, args, save_fir):
# N is number of batch to display
self.args = args
self.save_dir = save_fir
self.N = args.visual_num
@torch.no_grad()
def __call__(self, epoch, prefix, img1, img2, target):
imgs1 = []
imgs2 = []
targets = []
for n in range(self.N):
imgs1.append(invnorm(img1,n))
imgs2.append(invnorm(img2,n))
if target is not None:
targets.append(target[n].item())
else:
targets = None
self.display_images(epoch, prefix, imgs1, imgs2, targets)
def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets,
columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8):
if not (images1 and images2):
print("No images to display.")
return
height = max(height, int(len(images1)/columns) * height)
plt.figure(figsize=(width, height))
i = 1
if targets is not None:
for (im1, im2, tar) in zip(images1, images2, targets):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im2)
i += 1
else:
for (im1, im2) in zip(images1, images2):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.imshow(im2)
i += 1
plt.tight_layout()
output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class EmbeddingDisplayer:
def __init__(self, args, save_fir):
self.args = args
self.save_dir = save_fir
self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
@torch.no_grad()
def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None):
embeddings = embeddings.to('cpu').detach().numpy().copy()
targets = targets.to('cpu').detach().numpy().copy()
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(self.cifar10_classes)
output_img_name = 'emb_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class LossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.epochs = []
self.losses = []
def __call__(self, epoch, loss, prefix):
self.epochs.append(epoch)
self.losses.append(loss)
output_img_name = '{}_loss.svg'.format(prefix)
plt.plot(self.epochs, self.losses)
plt.title('Loss')
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class AccLossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.tr_accs = []
self.vl_accs = []
self.tr_losses = []
self.vl_losses = []
self.epochs = []
def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix):
self.tr_accs.append(tr_acc)
self.vl_accs.append(vl_acc)
self.tr_losses.append(tr_loss)
self.vl_losses.append(vl_loss)
self.epochs.append(epoch)
output_img_name = '{}_eval.svg'.format(prefix)
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
axL.plot(self.epochs, self.tr_accs, label='train')
axL.plot(self.epochs, self.vl_accs, label='val')
axL.set_title('Top-1 Accuracy')
axL.set_xlabel('epoch')
axL.set_ylabel('acc [%]')
axL.legend(loc="lower right")
axR.plot(self.epochs, self.tr_losses, label='train')
axR.plot(self.epochs, self.vl_losses, label='val')
axR.set_title('Loss')
axR.set_xlabel('epoch')
axR.set_ylabel('loss')
axR.legend(loc="upper right")
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
--- FILE SEPARATOR ---
from typing import Callable, Optional
import random
from PIL import Image
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import CIFAR10
np.random.seed(765)
random.seed(765)
class SupervisedPosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)]
self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths)
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.anchors)
def __getitem__(self, index):
anchor, label = self.anchors[index]
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
A = np.where(np.array(self.posnegs.dataset.targets) == label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([1]).long()
else:
A = np.where(np.array(self.posnegs.dataset.targets) != label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([0]).long()
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
class PosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
self.dataset = dataset
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
anchor, label = self.dataset[index]
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
posneg = anchor
target = torch.tensor([1]).long()
else:
while True:
neg_idx = random.randint(0, len(self.dataset)-1)
if neg_idx != index:
break
posneg, label = self.dataset[neg_idx]
target = torch.tensor([0]).long()
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
### Simple Siamese code
imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]]
class SimSiamTransform():
def __init__(self, image_size, train, mean_std=imagenet_mean_std):
self.train = train
if self.train:
image_size = 224 if image_size is None else image_size # by default simsiam use image size 224
p_blur = 0.5 if image_size > 32 else 0 # exclude cifar
# the paper didn't specify this, feel free to change this value
# I use the setting from simclr which is 50% chance applying the gaussian blur
# the 32 is prepared for cifar training where they disabled gaussian blur
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
else:
self.transform = transforms.Compose([
transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
def __call__(self, x):
x1 = self.transform(x)
x2 = self.transform(x)
return x1, x2
def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None):
if phase == 'train':
train = True
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'val':
train = False
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'linear_train':
train = True
transform = transforms.Compose([
transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
elif phase == 'linear_val':
train = False
transform = transforms.Compose([
transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download)
if debug_subset_size is not None:
dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
dataset.classes = dataset.dataset.classes
dataset.targets = dataset.dataset.targets
return dataset
|
{
"imported_by": [],
"imports": [
"/models/create_linear_eval_model.py",
"/utils/visualizer.py",
"/datasets/cifar10.py"
]
}
|
junprog/contrastive-baseline
|
/train.py
|
from utils.contrastive_trainer import CoTrainer
from utils.simsiam_trainer import SimSiamTrainer
import argparse
import os
import math
import torch
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train ')
parser.add_argument('--data-dir', default='/mnt/hdd02/process-ucf',
help='training data directory')
parser.add_argument('--save-dir', default='D:/exp_results',
help='directory to save models.')
parser.add_argument('--cifar10', action='store_true',
help='use cifar10 dataset')
parser.add_argument('--SimSiam', action='store_true',
help='try Simple Siamese Net')
parser.add_argument('--arch', type=str, default='vgg19',
help='the model architecture [vgg19, vgg19_bn, resnet18]')
parser.add_argument('--pattern-feature', type=str, default='conv-512x1x1',
help='the feature to contrast [conv-512x1x1, fc-4096]')
parser.add_argument('--projection', action='store_true',
help='use MLP projection')
parser.add_argument('--prediction', action='store_true',
help='use MLP prediction')
parser.add_argument('--mlp-bn', action='store_true',
help='use MLP Batch Normalization')
parser.add_argument('--lr', type=float, default=1e-2,
help='the initial learning rate')
parser.add_argument('--weight-decay', type=float, default=1e-4,
help='the weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='the momentum')
parser.add_argument('--div-row', type=int, default=3,
help='one side`s number of pathes')
parser.add_argument('--div-col', type=int, default=3,
help='one side`s number of pathes')
parser.add_argument('--aug', action='store_true',
help='the weight decay')
parser.add_argument('--margin', type=float, default=1.0,
help='the margin of loss function')
parser.add_argument('--resume', default='',
help='the path of resume training model')
parser.add_argument('--max-model-num', type=int, default=30,
help='max models num to save ')
parser.add_argument('--check_point', type=int, default=100,
help='milestone of save model checkpoint')
parser.add_argument('--max-epoch', type=int, default=300,
help='max training epoch')
parser.add_argument('--val-epoch', type=int, default=10,
help='the num of steps to log training information')
parser.add_argument('--val-start', type=int, default=0,
help='the epoch start to val')
parser.add_argument('--batch-size', type=int, default=8,
help='train batch size')
parser.add_argument('--device', default='0', help='assign device')
parser.add_argument('--num-workers', type=int, default=8,
help='the num of training process')
parser.add_argument('--crop-size', type=int, default=224,
help='the crop size of the train image')
parser.add_argument('--visual-num', type=int, default=4,
help='the number of visualize images')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
torch.backends.cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip('-') # set vis gpu
if args.SimSiam:
trainer = SimSiamTrainer(args)
else:
trainer = CoTrainer(args)
trainer.setup()
trainer.train()
|
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.siamese_net import SiameseNetwork
from models.l2_contrastive_loss import L2ContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, EmbeddingDisplayer
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10
class CoTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.emb = EmbeddingDisplayer(args, self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
# Download and create datasets
or_train = datasets.CIFAR10(root="CIFAR10_Dataset", train=True, transform=None, download=True)
or_val = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=None, download=True)
# splits CIFAR10 into two streams
self.datasets = {x: PosNegCifar10((or_train if x == 'train' else or_val),
phase=x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(os.path.join(args.data_dir, x),
args.crop_size,
args.div_num,
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(models.__dict__[args.arch], pattern_feature = args.pattern_feature)
self.model.to(self.device)
self.criterion = L2ContrastiveLoss(args.margin)
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, (input1, input2, target, label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(True):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, target)
self.emb(output1, label, epoch, 'train')
logging.info('Epoch {} Train, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, (input1, input2, target, label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(False):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, target)
self.emb(output1, label, epoch, 'val')
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
--- FILE SEPARATOR ---
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.simple_siamese_net import SiameseNetwork
from models.cosine_contrastive_loss import CosineContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, LossGraphPloter
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10, get_simsiam_dataset
class SimSiamTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.tr_graph = LossGraphPloter(self.save_dir)
self.vl_graph = LossGraphPloter(self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
self.datasets = {x: get_simsiam_dataset(args, x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(x,
args.data_dir,
args.crop_size,
(args.div_row, args.div_col),
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(args)
self.model.to(self.device)
self.criterion = CosineContrastiveLoss()
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
#self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=args.max_epoch)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, ((input1, input2), label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(True):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, label)
pass
logging.info('Epoch {} Train, Loss: {:.5f}, lr: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), self.optimizer.param_groups[0]['lr'], time.time()-epoch_start))
self.tr_graph(self.epoch, epoch_loss.get_avg(), 'tr')
if epoch % self.args.check_point == 0:
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, ((input1, input2), label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(False):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, label)
pass
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
self.vl_graph(self.epoch, epoch_loss.get_avg(), 'vl')
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
|
{
"imported_by": [],
"imports": [
"/utils/contrastive_trainer.py",
"/utils/simsiam_trainer.py"
]
}
|
junprog/contrastive-baseline
|
/utils/contrastive_trainer.py
|
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.siamese_net import SiameseNetwork
from models.l2_contrastive_loss import L2ContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, EmbeddingDisplayer
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10
class CoTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.emb = EmbeddingDisplayer(args, self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
# Download and create datasets
or_train = datasets.CIFAR10(root="CIFAR10_Dataset", train=True, transform=None, download=True)
or_val = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=None, download=True)
# splits CIFAR10 into two streams
self.datasets = {x: PosNegCifar10((or_train if x == 'train' else or_val),
phase=x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(os.path.join(args.data_dir, x),
args.crop_size,
args.div_num,
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(models.__dict__[args.arch], pattern_feature = args.pattern_feature)
self.model.to(self.device)
self.criterion = L2ContrastiveLoss(args.margin)
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, (input1, input2, target, label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(True):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, target)
self.emb(output1, label, epoch, 'train')
logging.info('Epoch {} Train, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, (input1, input2, target, label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
target = target.to(self.device)
with torch.set_grad_enabled(False):
output1, output2 = self.model(input1, input2)
loss = self.criterion(output1, output2, target)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, target)
self.emb(output1, label, epoch, 'val')
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
|
import os
import numpy as np
import torch
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
class Save_Handle(object):
"""handle the number of """
def __init__(self, max_num):
self.save_list = []
self.max_num = max_num
def append(self, save_path):
if len(self.save_list) < self.max_num:
self.save_list.append(save_path)
else:
remove_path = self.save_list[0]
del self.save_list[0]
self.save_list.append(save_path)
if os.path.exists(remove_path):
os.remove(remove_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 1.0 * self.sum / self.count
def get_avg(self):
return self.avg
def get_count(self):
return self.count
## cannot use in training
@torch.no_grad()
def accuracy(meter, output1, output2, target):
"""Computes the accuracy overthe predictions"""
for logit in [output1, output2]:
corrects = (torch.max(logit, 1)[1].data == target.squeeze().long().data).sum()
accu = float(corrects) / float(target.size()[0])
meter.update(accu)
return meter
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import torch.nn.functional as F
class L2ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
Args :
output1 & output2 : [N, dim]
target : [N]
"""
def __init__(self, margin=1.0):
super().__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output1, output2, target, size_average=True):
target = target.squeeze()
distances = (output2 - output1).pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
--- FILE SEPARATOR ---
import os
import numpy as np
from PIL import Image
import torch
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def invnorm(img, N):
img = img[N,:,:,:].to('cpu').detach().numpy().copy()
img = img.transpose(1,2,0)
img = img*std+mean
return img
class ImageDisplayer:
def __init__(self, args, save_fir):
# N is number of batch to display
self.args = args
self.save_dir = save_fir
self.N = args.visual_num
@torch.no_grad()
def __call__(self, epoch, prefix, img1, img2, target):
imgs1 = []
imgs2 = []
targets = []
for n in range(self.N):
imgs1.append(invnorm(img1,n))
imgs2.append(invnorm(img2,n))
if target is not None:
targets.append(target[n].item())
else:
targets = None
self.display_images(epoch, prefix, imgs1, imgs2, targets)
def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets,
columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8):
if not (images1 and images2):
print("No images to display.")
return
height = max(height, int(len(images1)/columns) * height)
plt.figure(figsize=(width, height))
i = 1
if targets is not None:
for (im1, im2, tar) in zip(images1, images2, targets):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im2)
i += 1
else:
for (im1, im2) in zip(images1, images2):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.imshow(im2)
i += 1
plt.tight_layout()
output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class EmbeddingDisplayer:
def __init__(self, args, save_fir):
self.args = args
self.save_dir = save_fir
self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
@torch.no_grad()
def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None):
embeddings = embeddings.to('cpu').detach().numpy().copy()
targets = targets.to('cpu').detach().numpy().copy()
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(self.cifar10_classes)
output_img_name = 'emb_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class LossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.epochs = []
self.losses = []
def __call__(self, epoch, loss, prefix):
self.epochs.append(epoch)
self.losses.append(loss)
output_img_name = '{}_loss.svg'.format(prefix)
plt.plot(self.epochs, self.losses)
plt.title('Loss')
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class AccLossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.tr_accs = []
self.vl_accs = []
self.tr_losses = []
self.vl_losses = []
self.epochs = []
def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix):
self.tr_accs.append(tr_acc)
self.vl_accs.append(vl_acc)
self.tr_losses.append(tr_loss)
self.vl_losses.append(vl_loss)
self.epochs.append(epoch)
output_img_name = '{}_eval.svg'.format(prefix)
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
axL.plot(self.epochs, self.tr_accs, label='train')
axL.plot(self.epochs, self.vl_accs, label='val')
axL.set_title('Top-1 Accuracy')
axL.set_xlabel('epoch')
axL.set_ylabel('acc [%]')
axL.legend(loc="lower right")
axR.plot(self.epochs, self.tr_losses, label='train')
axR.plot(self.epochs, self.vl_losses, label='val')
axR.set_title('Loss')
axR.set_xlabel('epoch')
axR.set_ylabel('loss')
axR.legend(loc="upper right")
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
--- FILE SEPARATOR ---
# in : original image
# out : cropped img1 (anchor)
# cropped img2 (compete)
# target (positive img1 - img2 : 1, negative img1 - img2 : 0)
import os
from glob import glob
import random
import numpy as np
from PIL import Image
from PIL import ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms.functional as F
from torchvision import transforms
random.seed(765)
def divide_patches(img, row, col):
patche_size_w = int(img.size[0] / col)
patche_size_h = int(img.size[1] / row)
patches = []
for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
if cnt_i == row:
break
for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
if cnt_j == col:
break
box = (j, i, j+patche_size_w, i+patche_size_h)
patches.append(img.crop(box))
return patches
def create_pos_pair(patches):
idx = random.randint(0, len(patches)-1)
img1 = patches[idx]
img2 = patches[idx]
target = np.array([1])
return img1, img2, target
def create_neg_pair(patches):
idx = random.sample(range(0, len(patches)-1), k=2)
img1 = patches[idx[0]]
img2 = patches[idx[1]]
target = np.array([0])
return img1, img2, target
def random_crop(im_h, im_w, crop_h, crop_w):
res_h = im_h - crop_h
res_w = im_w - crop_w
i = random.randint(0, res_h)
j = random.randint(0, res_w)
return i, j, crop_h, crop_w
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class PosNegSpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, data_path, crop_size, divide_num=(3,3), aug=True):
self.data_path = data_path
self.im_list = sorted(glob(os.path.join(self.data_path, '*.jpg')))
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
if aug:
self.aug = transforms.Compose([
transforms.CenterCrop(self.c_size),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip()
])
else:
self.aug = transforms.CenterCrop(self.c_size)
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
if random.random() > 0.5:
img1, img2, target = create_pos_pair(patches)
else:
img1, img2, target = create_neg_pair(patches)
img1 = self.aug(img1)
img2 = self.aug(img2)
target = torch.from_numpy(target).long()
img1 = self.trans(img1)
img2 = self.trans(img2)
return img1, img2, target, None
class SpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, phase, data_path, crop_size, divide_num=(3,3), aug=True):
with open(os.path.join(data_path, '{}.txt'.format(phase)), 'r') as f:
im_list = f.readlines()
self.im_list = [im_name.replace('\n', '') for im_name in im_list]
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
self.trans = transforms.Compose([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
img1, img2, label = create_pos_pair(patches)
assert img1.size == img2.size
wd, ht = img1.size
i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
img1 = F.crop(img1, i, j, h, w)
img2 = F.crop(img2, i, j, h, w)
img1 = self.trans(img1)
img2 = self.trans(img2)
imgs = (img1, img2)
return imgs, label
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
class SiameseNetwork(nn.Module):
def __init__(self, model, pretrained=False, simple_model=False):
super(SiameseNetwork, self).__init__()
self.simple_model = simple_model
if simple_model:
self.features = nn.Sequential(nn.Conv2d(3, 32, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(64, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2))
self.classifier = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, 2))
else:
if pretrained:
self.encoder = model(pretrained=True)
self.encoder.classifier = nn.Sequential(*[self.encoder.classifier[i] for i in range(6)])
self.encoder.classifier.add_module('out', nn.Linear(4096, 2))
else:
self.encoder = model(num_classes=2)
def forward_once(self, x):
if self.simple_model:
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
else:
output = self.encoder(x)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
--- FILE SEPARATOR ---
from typing import Callable, Optional
import random
from PIL import Image
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import CIFAR10
np.random.seed(765)
random.seed(765)
class SupervisedPosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)]
self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths)
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.anchors)
def __getitem__(self, index):
anchor, label = self.anchors[index]
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
A = np.where(np.array(self.posnegs.dataset.targets) == label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([1]).long()
else:
A = np.where(np.array(self.posnegs.dataset.targets) != label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([0]).long()
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
class PosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
self.dataset = dataset
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
anchor, label = self.dataset[index]
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
posneg = anchor
target = torch.tensor([1]).long()
else:
while True:
neg_idx = random.randint(0, len(self.dataset)-1)
if neg_idx != index:
break
posneg, label = self.dataset[neg_idx]
target = torch.tensor([0]).long()
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
### Simple Siamese code
imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]]
class SimSiamTransform():
def __init__(self, image_size, train, mean_std=imagenet_mean_std):
self.train = train
if self.train:
image_size = 224 if image_size is None else image_size # by default simsiam use image size 224
p_blur = 0.5 if image_size > 32 else 0 # exclude cifar
# the paper didn't specify this, feel free to change this value
# I use the setting from simclr which is 50% chance applying the gaussian blur
# the 32 is prepared for cifar training where they disabled gaussian blur
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
else:
self.transform = transforms.Compose([
transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
def __call__(self, x):
x1 = self.transform(x)
x2 = self.transform(x)
return x1, x2
def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None):
if phase == 'train':
train = True
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'val':
train = False
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'linear_train':
train = True
transform = transforms.Compose([
transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
elif phase == 'linear_val':
train = False
transform = transforms.Compose([
transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download)
if debug_subset_size is not None:
dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
dataset.classes = dataset.dataset.classes
dataset.targets = dataset.dataset.targets
return dataset
|
{
"imported_by": [
"/train.py"
],
"imports": [
"/utils/helper.py",
"/models/l2_contrastive_loss.py",
"/utils/visualizer.py",
"/datasets/spatial.py",
"/models/siamese_net.py",
"/datasets/cifar10.py"
]
}
|
junprog/contrastive-baseline
|
/utils/simsiam_trainer.py
|
import os
import sys
import time
import logging
import numpy as np
import torch
from torch import optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
import torchvision.models as models
import torchvision.datasets as datasets
from models.simple_siamese_net import SiameseNetwork
from models.cosine_contrastive_loss import CosineContrastiveLoss
from utils.trainer import Trainer
from utils.helper import Save_Handle, AverageMeter, worker_init_fn
from utils.visualizer import ImageDisplayer, LossGraphPloter
from datasets.spatial import SpatialDataset
from datasets.cifar10 import PosNegCifar10, get_simsiam_dataset
class SimSiamTrainer(Trainer):
def setup(self):
"""initialize the datasets, model, loss and optimizer"""
args = self.args
self.vis = ImageDisplayer(args, self.save_dir)
self.tr_graph = LossGraphPloter(self.save_dir)
self.vl_graph = LossGraphPloter(self.save_dir)
if torch.cuda.is_available():
self.device = torch.device("cuda")
self.device_count = torch.cuda.device_count()
logging.info('using {} gpus'.format(self.device_count))
else:
raise Exception("gpu is not available")
if args.cifar10:
self.datasets = {x: get_simsiam_dataset(args, x) for x in ['train', 'val']}
else:
self.datasets = {x: SpatialDataset(x,
args.data_dir,
args.crop_size,
(args.div_row, args.div_col),
args.aug) for x in ['train', 'val']}
self.dataloaders = {x: DataLoader(self.datasets[x],
batch_size=args.batch_size,
shuffle=(True if x == 'train' else False),
num_workers=args.num_workers*self.device_count,
pin_memory=(True if x == 'train' else False),
worker_init_fn=worker_init_fn) for x in ['train', 'val']}
# Define model, loss, optim
self.model = SiameseNetwork(args)
self.model.to(self.device)
self.criterion = CosineContrastiveLoss()
self.criterion.to(self.device)
self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
#self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=args.max_epoch)
self.start_epoch = 0
self.best_loss = np.inf
if args.resume:
suf = args.resume.rsplit('.', 1)[-1]
if suf == 'tar':
checkpoint = torch.load(args.resume, self.device)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.start_epoch = checkpoint['epoch'] + 1
elif suf == 'pth':
self.model.load_state_dict(torch.load(args.resume, self.device))
self.save_list = Save_Handle(max_num=args.max_model_num)
def train(self):
"""training process"""
args = self.args
for epoch in range(self.start_epoch, args.max_epoch):
logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
self.epoch = epoch
self.train_epoch(epoch)
if epoch % args.val_epoch == 0 and epoch >= args.val_start:
self.val_epoch(epoch)
def train_epoch(self, epoch):
epoch_loss = AverageMeter()
epoch_start = time.time()
self.model.train() # Set model to training mode
for step, ((input1, input2), label) in enumerate(self.dataloaders['train']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(True):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
# visualize
if step == 0:
self.vis(epoch, 'train', input1, input2, label)
pass
logging.info('Epoch {} Train, Loss: {:.5f}, lr: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), self.optimizer.param_groups[0]['lr'], time.time()-epoch_start))
self.tr_graph(self.epoch, epoch_loss.get_avg(), 'tr')
if epoch % self.args.check_point == 0:
model_state_dic = self.model.state_dict()
save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
torch.save({
'epoch': self.epoch,
'optimizer_state_dict': self.optimizer.state_dict(),
'model_state_dict': model_state_dic
}, save_path)
self.save_list.append(save_path) # control the number of saved models
def val_epoch(self, epoch):
epoch_start = time.time()
self.model.eval() # Set model to evaluate mode
epoch_loss = AverageMeter()
for step, ((input1, input2), label) in enumerate(self.dataloaders['val']):
input1 = input1.to(self.device)
input2 = input2.to(self.device)
with torch.set_grad_enabled(False):
(z1, z2), (p1, p2) = self.model(input1, input2)
loss = self.criterion(z1, z2, p1, p2)
epoch_loss.update(loss.item(), input1.size(0))
# visualize
if step == 0:
self.vis(epoch, 'val', input1, input2, label)
pass
logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
.format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
self.vl_graph(self.epoch, epoch_loss.get_avg(), 'vl')
model_state_dic = self.model.state_dict()
if self.best_loss > epoch_loss.get_avg():
self.best_loss = epoch_loss.get_avg()
logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth'))
|
import os
import numpy as np
import torch
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
class Save_Handle(object):
"""handle the number of """
def __init__(self, max_num):
self.save_list = []
self.max_num = max_num
def append(self, save_path):
if len(self.save_list) < self.max_num:
self.save_list.append(save_path)
else:
remove_path = self.save_list[0]
del self.save_list[0]
self.save_list.append(save_path)
if os.path.exists(remove_path):
os.remove(remove_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 1.0 * self.sum / self.count
def get_avg(self):
return self.avg
def get_count(self):
return self.count
## cannot use in training
@torch.no_grad()
def accuracy(meter, output1, output2, target):
"""Computes the accuracy overthe predictions"""
for logit in [output1, output2]:
corrects = (torch.max(logit, 1)[1].data == target.squeeze().long().data).sum()
accu = float(corrects) / float(target.size()[0])
meter.update(accu)
return meter
--- FILE SEPARATOR ---
import os
import numpy as np
from PIL import Image
import torch
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def invnorm(img, N):
img = img[N,:,:,:].to('cpu').detach().numpy().copy()
img = img.transpose(1,2,0)
img = img*std+mean
return img
class ImageDisplayer:
def __init__(self, args, save_fir):
# N is number of batch to display
self.args = args
self.save_dir = save_fir
self.N = args.visual_num
@torch.no_grad()
def __call__(self, epoch, prefix, img1, img2, target):
imgs1 = []
imgs2 = []
targets = []
for n in range(self.N):
imgs1.append(invnorm(img1,n))
imgs2.append(invnorm(img2,n))
if target is not None:
targets.append(target[n].item())
else:
targets = None
self.display_images(epoch, prefix, imgs1, imgs2, targets)
def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets,
columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8):
if not (images1 and images2):
print("No images to display.")
return
height = max(height, int(len(images1)/columns) * height)
plt.figure(figsize=(width, height))
i = 1
if targets is not None:
for (im1, im2, tar) in zip(images1, images2, targets):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.title(tar, fontsize=20)
plt.imshow(im2)
i += 1
else:
for (im1, im2) in zip(images1, images2):
im1 = Image.fromarray(np.uint8(im1*255))
im2 = Image.fromarray(np.uint8(im2*255))
plt.subplot(self.N, 2, i)
plt.imshow(im1)
i += 1
plt.subplot(self.N, 2, i)
plt.imshow(im2)
i += 1
plt.tight_layout()
output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class EmbeddingDisplayer:
def __init__(self, args, save_fir):
self.args = args
self.save_dir = save_fir
self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
@torch.no_grad()
def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None):
embeddings = embeddings.to('cpu').detach().numpy().copy()
targets = targets.to('cpu').detach().numpy().copy()
plt.figure(figsize=(10,10))
for i in range(10):
inds = np.where(targets==i)[0]
plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(self.cifar10_classes)
output_img_name = 'emb_{}_{}.png'.format(prefix, epoch)
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class LossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.epochs = []
self.losses = []
def __call__(self, epoch, loss, prefix):
self.epochs.append(epoch)
self.losses.append(loss)
output_img_name = '{}_loss.svg'.format(prefix)
plt.plot(self.epochs, self.losses)
plt.title('Loss')
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
class AccLossGraphPloter:
def __init__(self, save_fir):
self.save_dir = save_fir
self.tr_accs = []
self.vl_accs = []
self.tr_losses = []
self.vl_losses = []
self.epochs = []
def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix):
self.tr_accs.append(tr_acc)
self.vl_accs.append(vl_acc)
self.tr_losses.append(tr_loss)
self.vl_losses.append(vl_loss)
self.epochs.append(epoch)
output_img_name = '{}_eval.svg'.format(prefix)
fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
axL.plot(self.epochs, self.tr_accs, label='train')
axL.plot(self.epochs, self.vl_accs, label='val')
axL.set_title('Top-1 Accuracy')
axL.set_xlabel('epoch')
axL.set_ylabel('acc [%]')
axL.legend(loc="lower right")
axR.plot(self.epochs, self.tr_losses, label='train')
axR.plot(self.epochs, self.vl_losses, label='val')
axR.set_title('Loss')
axR.set_xlabel('epoch')
axR.set_ylabel('loss')
axR.legend(loc="upper right")
plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
plt.close()
--- FILE SEPARATOR ---
# in : original image
# out : cropped img1 (anchor)
# cropped img2 (compete)
# target (positive img1 - img2 : 1, negative img1 - img2 : 0)
import os
from glob import glob
import random
import numpy as np
from PIL import Image
from PIL import ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms.functional as F
from torchvision import transforms
random.seed(765)
def divide_patches(img, row, col):
patche_size_w = int(img.size[0] / col)
patche_size_h = int(img.size[1] / row)
patches = []
for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
if cnt_i == row:
break
for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
if cnt_j == col:
break
box = (j, i, j+patche_size_w, i+patche_size_h)
patches.append(img.crop(box))
return patches
def create_pos_pair(patches):
idx = random.randint(0, len(patches)-1)
img1 = patches[idx]
img2 = patches[idx]
target = np.array([1])
return img1, img2, target
def create_neg_pair(patches):
idx = random.sample(range(0, len(patches)-1), k=2)
img1 = patches[idx[0]]
img2 = patches[idx[1]]
target = np.array([0])
return img1, img2, target
def random_crop(im_h, im_w, crop_h, crop_w):
res_h = im_h - crop_h
res_w = im_w - crop_w
i = random.randint(0, res_h)
j = random.randint(0, res_w)
return i, j, crop_h, crop_w
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class PosNegSpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, data_path, crop_size, divide_num=(3,3), aug=True):
self.data_path = data_path
self.im_list = sorted(glob(os.path.join(self.data_path, '*.jpg')))
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
if aug:
self.aug = transforms.Compose([
transforms.CenterCrop(self.c_size),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip()
])
else:
self.aug = transforms.CenterCrop(self.c_size)
self.trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
if random.random() > 0.5:
img1, img2, target = create_pos_pair(patches)
else:
img1, img2, target = create_neg_pair(patches)
img1 = self.aug(img1)
img2 = self.aug(img2)
target = torch.from_numpy(target).long()
img1 = self.trans(img1)
img2 = self.trans(img2)
return img1, img2, target, None
class SpatialDataset(data.Dataset):
# divide_num : 3 -> 3x3= 9 paches
def __init__(self, phase, data_path, crop_size, divide_num=(3,3), aug=True):
with open(os.path.join(data_path, '{}.txt'.format(phase)), 'r') as f:
im_list = f.readlines()
self.im_list = [im_name.replace('\n', '') for im_name in im_list]
self.c_size = crop_size
self.d_row = divide_num[0]
self.d_col = divide_num[1]
self.trans = transforms.Compose([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __len__(self):
return len(self.im_list)
def __getitem__(self, index):
img_path = self.im_list[index]
img = Image.open(img_path).convert('RGB')
patches = divide_patches(img, self.d_row, self.d_col)
img1, img2, label = create_pos_pair(patches)
assert img1.size == img2.size
wd, ht = img1.size
i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
img1 = F.crop(img1, i, j, h, w)
img2 = F.crop(img2, i, j, h, w)
img1 = self.trans(img1)
img2 = self.trans(img2)
imgs = (img1, img2)
return imgs, label
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import torch.nn.functional as F
def D(p, z, version='simplified'): # negative cosine similarity
if version == 'original':
z = z.detach() # stop gradient
p = F.normalize(p, dim=1) # l2-normalize
z = F.normalize(z, dim=1) # l2-normalize
return -(p*z).sum(dim=1).mean()
elif version == 'simplified':
return - F.cosine_similarity(p, z.detach(), dim=-1).mean()
else:
raise Exception
class CosineContrastiveLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, z1, z2, p1, p2):
if z1.dim() != 2:
z1 = z1.squeeze()
if z2.dim() != 2:
z2 = z2.squeeze()
if p1 is not None or p2 is not None:
loss = D(p1, z2) / 2 + D(p2, z1) / 2
else:
loss = D(z1, z2)
return loss
--- FILE SEPARATOR ---
from typing import Callable, Optional
import random
from PIL import Image
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torchvision.datasets import CIFAR10
np.random.seed(765)
random.seed(765)
class SupervisedPosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)]
self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths)
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.anchors)
def __getitem__(self, index):
anchor, label = self.anchors[index]
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
A = np.where(np.array(self.posnegs.dataset.targets) == label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([1]).long()
else:
A = np.where(np.array(self.posnegs.dataset.targets) != label)[0]
posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
target = torch.tensor([0]).long()
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
class PosNegCifar10(torch.utils.data.Dataset):
def __init__(self, dataset, phase):
# split by some thresholds here 80% anchors, 20% for posnegs
self.dataset = dataset
if phase == 'train':
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.anchor_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.posneg_transform = transforms.Compose([transforms.Resize(64),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
anchor, label = self.dataset[index]
# now pair this up with an image from the same class in the second stream
if random.random() > 0.5:
posneg = anchor
target = torch.tensor([1]).long()
else:
while True:
neg_idx = random.randint(0, len(self.dataset)-1)
if neg_idx != index:
break
posneg, label = self.dataset[neg_idx]
target = torch.tensor([0]).long()
if self.anchor_transform is not None:
anchor = self.anchor_transform(anchor)
if self.posneg_transform is not None:
posneg = self.posneg_transform(posneg)
return anchor, posneg, target, label
### Simple Siamese code
imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]]
class SimSiamTransform():
def __init__(self, image_size, train, mean_std=imagenet_mean_std):
self.train = train
if self.train:
image_size = 224 if image_size is None else image_size # by default simsiam use image size 224
p_blur = 0.5 if image_size > 32 else 0 # exclude cifar
# the paper didn't specify this, feel free to change this value
# I use the setting from simclr which is 50% chance applying the gaussian blur
# the 32 is prepared for cifar training where they disabled gaussian blur
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
else:
self.transform = transforms.Compose([
transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(*mean_std)
])
def __call__(self, x):
x1 = self.transform(x)
x2 = self.transform(x)
return x1, x2
def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None):
if phase == 'train':
train = True
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'val':
train = False
transform = SimSiamTransform(args.crop_size, train)
elif phase == 'linear_train':
train = True
transform = transforms.Compose([
transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
elif phase == 'linear_val':
train = False
transform = transforms.Compose([
transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize(*imagenet_mean_std)
])
dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download)
if debug_subset_size is not None:
dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
dataset.classes = dataset.dataset.classes
dataset.targets = dataset.dataset.targets
return dataset
|
{
"imported_by": [
"/train.py"
],
"imports": [
"/utils/helper.py",
"/utils/visualizer.py",
"/datasets/spatial.py",
"/models/cosine_contrastive_loss.py",
"/datasets/cifar10.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/eval.py
|
from model.history_ import plot_metric_df
import pandas as pd
import matplotlib.pyplot as plt
import os
xx = os.getcwd()
path_root = '../report/result/'
task_name = 'ablation_time_all'
metric_list = []
metric_list_dir = ['metric_ablation_time_enh_10nrun_1Fold.csv',
'metric_ablation_time_vanilla_10nrun_1Fold.csv',
'metric_gbm_10nrun_1Fold.csv',
'metric_lr_10nrun_1Fold.csv',
]
for metric_dir in metric_list_dir:
dir = path_root + metric_dir
metric_df = pd.read_csv(dir)
metric_list.append(metric_df)
plot_metric_df(metric_list, task_name, val_flag='val_')
plt.show()
pass
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
plt.rc('font', family='Times New Roman')
font_size = 16
def plot_metric_df(history_list, task_name, val_flag='test_'):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['r2', 'mae', 'mse']
fig = plt.figure(figsize=(20, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_metric(history_list, metric, val_flag)
fig.subplots_adjust(top=0.8)
legend_labels = ['ours',
# 'enh_nonrelapse',
'ATT+MLP',
# 'vanilla_nonrelapse',
'LGB',
# 'lightgbm_nonrelapse',
'Lasso',
# 'lasso_nonrelapse'
]
plt.legend(labels= legend_labels,
ncol = len(legend_labels),
# loc='best',
loc='upper center',
fontsize=14,
bbox_to_anchor=(-1.2, 1, 1, 0.2),
borderaxespad = 0.,
)
# plt.title('{} {}'.format(task_name, metric), fontsize=font_size)
def show_metric(history_list, metrics_name, val_flag=''):
marker_list = ['*', 'd', 's', 'x', 'o']
metrics_name_dict = {'r2':'R-square', 'mae':'mean absolute error', 'mse':'mean squared error'}
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}\b'.format(val_flag, metrics_name))[:3000]
plt.plot(history_metric, linestyle=':', marker=marker_list[m], linewidth=2)
plt.xticks(range(0, 11), fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(metrics_name_dict[metrics_name], fontsize=font_size)
plt.xlabel('Round', fontsize=font_size)
def plot_history_df(history_list, task_name, val_flag=''):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_history(history_list, metric, val_flag)
plt.legend(labels=['attention', 'attention+mlp', 'attention+label corrected',
'attention+mlp+label corrected(ours)', 'mlp', 'mlp+label corrected'],
fontsize=14)
# plt.title('{} {}'.format(metric, task_name), fontsize=font_size)
def show_history(history_list, metrics_name, val_flag=''):
marker_list = ['^', 'd', 's', '*', 'x', 'o']
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}'.format(val_flag, metrics_name))[:3000]
history_ = np.mean(history_metric, axis=1)
len_ = history_.shape[0]
plt.plot(history_, linewidth=2, marker=marker_list[m], markevery=200)
plt.fill_between(range(len_), np.min(history_metric, axis=1), np.max(history_metric, axis=1), alpha=0.3)
plt.xticks(fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(val_flag + metrics_name, fontsize=font_size)
plt.xlabel('Epoch', fontsize=font_size)
def plot_history(history_list, task_name, val_flag=False):
if task_name == 'relapse_risk':
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
for i, metric in enumerate(metric_list):
plt.subplot(squrt(), L, i+1)
show_train_history(history_list, metric)
if val_flag:
show_train_history(history_list, 'val_{}'.format(metric))
plt.legend(labels=[metric, 'val_{}'.format(metric)], loc='upper left')
plt.title('{} {}'.format(task_name, metric))
def history_save(history_list, history_name):
history_all = pd.DataFrame([])
for history in history_list:
history_ = pd.DataFrame.from_dict(history.history, orient='index')
history_all = pd.concat([history_all, history_], axis=0)
history_all.to_csv('./hitory_{}.csv'.format(history_name))
def show_train_history(history_list, metrics_name):
metrics_list = None
for history in history_list:
history_metric = pd.DataFrame(np.array(history.history[metrics_name]).reshape(1, -1))
if metrics_list is None:
metrics_list = history_metric
else:
metrics_list = pd.concat([metrics_list, history_metric], axis=0)
# metrics = np.median(metrics_list, axis=0)
metrics = np.mean(metrics_list, axis=0)
plt.plot(metrics)
plt.ylabel(metrics_name)
plt.xlabel('Epoch')
|
{
"imported_by": [],
"imports": [
"/Regression/src/model/history_.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/learn_weight_main.py
|
# Copyright (c) 2017 - 2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Runs MNIST experitment. Default 10 runs for 10 random seeds.
#
# Usage:
# python -m mnist.imblanace_mnist_train_ad.py
#
# Flags:
# --exp [string] Experiment name, `ours`, `hm`, `ratio`, `random` or `baseline`.
# --pos_ratio [float] The ratio for the positive class, choose between 0.9 - 0.995.
# --nrun [int] Total number of runs with different random seeds.
# --ntrain [int] Number of training examples.
# --nval [int] Number of validation examples.
# --ntest [int] Number of test examples.
# --tensorboard Writes TensorBoard logs while training, default True.
# --notensorboard Disable TensorBoard.
# --verbose Print training progress, default False.
# --noverbose Disable printing.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import pandas as pd
import os
import six
import tensorflow as tf
from collections import namedtuple
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
from tensorflow.examples.tutorials.mnist import input_data
from tqdm import tqdm
from learn_rewieght.reweight import get_model, reweight_random, reweight_autodiff, reweight_hard_mining
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
from model.training_ import training_model, model_training, precision, recall, f1, r2
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.ERROR)
flags = tf.flags
flags.DEFINE_float('pos_ratio', 0.995, 'Ratio of positive examples in training')
flags.DEFINE_integer('nrun', 10, 'Number of runs')
flags.DEFINE_integer('ntest', 500, 'Number of testing examples')
flags.DEFINE_integer('ntrain', 5000, 'Number of training examples')
flags.DEFINE_integer('nval', 10, 'Number of validation examples')
flags.DEFINE_bool('verbose', False, 'Whether to print training progress')
flags.DEFINE_bool('tensorboard', False, 'Whether to save training progress')
flags.DEFINE_string('exp', 'baseline', 'Which experiment to run')
FLAGS = tf.flags.FLAGS
Config = namedtuple('Config', [
'reweight', 'lr', 'num_steps', 'random', 'ratio_weighted', 'nval', 'hard_mining', 'bsize'
])
exp_repo = dict()
def RegisterExp(name):
def _decorator(f):
exp_repo[name] = f
return f
return _decorator
LR = 0.001
NUM_STEPS = 4000
@RegisterExp('baseline')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('hm')
def baseline_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=True,
bsize=500,
nval=0)
@RegisterExp('ratio')
def ratio_config():
return Config(
reweight=False,
num_steps=NUM_STEPS * 2,
lr=LR,
random=False,
ratio_weighted=True,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('random')
def dpfish_config():
return Config(
reweight=True,
num_steps=NUM_STEPS * 2,
lr=LR,
random=True,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=0)
@RegisterExp('ours')
def ours_config():
return Config(
reweight=True,
num_steps=NUM_STEPS,
lr=LR,
random=False,
ratio_weighted=False,
hard_mining=False,
bsize=100,
nval=FLAGS.nval)
def get_imbalance_dataset(mnist,
pos_ratio=0.9,
ntrain=5000,
nval=10,
ntest=500,
seed=0,
class_0=4,
class_1=9):
rnd = np.random.RandomState(seed)
# In training, we have 10% 4 and 90% 9.
# In testing, we have 50% 4 and 50% 9.
ratio = 1 - pos_ratio
ratio_test = 0.5
x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels
x_train_0 = x_train[y_train == class_0]
x_test_0 = x_test[y_test == class_0]
# First shuffle, negative.
idx = np.arange(x_train_0.shape[0])
rnd.shuffle(idx)
x_train_0 = x_train_0[idx]
nval_small_neg = int(np.floor(nval * ratio_test))
ntrain_small_neg = int(np.floor(ntrain * ratio)) - nval_small_neg
x_val_0 = x_train_0[:nval_small_neg] # 450 4 in validation.
x_train_0 = x_train_0[nval_small_neg:nval_small_neg + ntrain_small_neg] # 500 4 in training.
if FLAGS.verbose:
print('Number of train negative classes', ntrain_small_neg)
print('Number of val negative classes', nval_small_neg)
idx = np.arange(x_test_0.shape[0])
rnd.shuffle(idx)
x_test_0 = x_test_0[:int(np.floor(ntest * ratio_test))] # 450 4 in testing.
x_train_1 = x_train[y_train == class_1]
x_test_1 = x_test[y_test == class_1]
# First shuffle, positive.
idx = np.arange(x_train_1.shape[0])
rnd.shuffle(idx)
x_train_1 = x_train_1[idx]
nvalsmall_pos = int(np.floor(nval * (1 - ratio_test)))
ntrainsmall_pos = int(np.floor(ntrain * (1 - ratio))) - nvalsmall_pos
x_val_1 = x_train_1[:nvalsmall_pos] # 50 9 in validation.
x_train_1 = x_train_1[nvalsmall_pos:nvalsmall_pos + ntrainsmall_pos] # 4500 9 in training.
idx = np.arange(x_test_1.shape[0])
rnd.shuffle(idx)
x_test_1 = x_test_1[idx]
x_test_1 = x_test_1[:int(np.floor(ntest * (1 - ratio_test)))] # 500 9 in testing.
if FLAGS.verbose:
print('Number of train positive classes', ntrainsmall_pos)
print('Number of val positive classes', nvalsmall_pos)
y_train_subset = np.concatenate([np.zeros([x_train_0.shape[0]]), np.ones([x_train_1.shape[0]])])
y_val_subset = np.concatenate([np.zeros([x_val_0.shape[0]]), np.ones([x_val_1.shape[0]])])
y_test_subset = np.concatenate([np.zeros([x_test_0.shape[0]]), np.ones([x_test_1.shape[0]])])
y_train_pos_subset = np.ones([x_train_1.shape[0]])
y_train_neg_subset = np.zeros([x_train_0.shape[0]])
x_train_subset = np.concatenate([x_train_0, x_train_1], axis=0).reshape([-1, 28, 28, 1])
x_val_subset = np.concatenate([x_val_0, x_val_1], axis=0).reshape([-1, 28, 28, 1])
x_test_subset = np.concatenate([x_test_0, x_test_1], axis=0).reshape([-1, 28, 28, 1])
x_train_pos_subset = x_train_1.reshape([-1, 28, 28, 1])
x_train_neg_subset = x_train_0.reshape([-1, 28, 28, 1])
# Final shuffle.
idx = np.arange(x_train_subset.shape[0])
rnd.shuffle(idx)
x_train_subset = x_train_subset[idx]
y_train_subset = y_train_subset[idx]
idx = np.arange(x_val_subset.shape[0])
rnd.shuffle(idx)
x_val_subset = x_val_subset[idx]
y_val_subset = y_val_subset[idx]
idx = np.arange(x_test_subset.shape[0])
rnd.shuffle(idx)
x_test_subset = x_test_subset[idx]
y_test_subset = y_test_subset[idx]
train_set = DataSet(x_train_subset * 255.0, y_train_subset)
train_pos_set = DataSet(x_train_pos_subset * 255.0, y_train_pos_subset)
train_neg_set = DataSet(x_train_neg_subset * 255.0, y_train_neg_subset)
val_set = DataSet(x_val_subset * 255.0, y_val_subset)
test_set = DataSet(x_test_subset * 255.0, y_test_subset)
return train_set, val_set, test_set, train_pos_set, train_neg_set
def get_exp_logger(sess, log_folder):
"""Gets a TensorBoard logger."""
with tf.name_scope('Summary'):
writer = tf.summary.FileWriter(os.path.join(log_folder), sess.graph)
class ExperimentLogger():
def log(self, niter, name, value):
summary = tf.Summary()
summary.value.add(tag=name, simple_value=value)
writer.add_summary(summary, niter)
def flush(self):
"""Flushes results to disk."""
writer.flush()
return ExperimentLogger()
def evaluate(sess, x_, y_, acc_, x, y, x_test, y_test):
# Calculate final results.
train_acc = sess.run(acc_, feed_dict={x_: x, y_: y})
test_acc = sess.run(acc_, feed_dict={x_: x_test, y_: y_test})
return train_acc, test_acc
def get_metric(pred, y):
total_error = tf.reduce_sum(tf.square(tf.subtract(y, tf.reduce_mean(y))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(y, pred)))
R_squared = tf.reduce_mean(tf.subtract(1.0, tf.div(unexplained_error, total_error)))
mse = tf.reduce_mean(tf.square(pred - y))
return mse
def run(train_data, test_data, seed, task_name, target='label'):
train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=True)
_, test_data = anomaly_dectection(train_data, test_data)
# train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection
x, y, x_val, y_val, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio) # label confusion according to requirements
x.reset_index(inplace=True)
x.drop(columns=['基线-患者基本信息-ID_sparse'], inplace=True)
y.reset_index(inplace=True)
y_val = y.loc[y['sup_label'] == 0].sample(n=clean_data_num, random_state=seed)
x_val = x.loc[y_val.index]
x.drop(index=x_val.index, inplace=True)
y.drop(index=x_val.index, inplace=True)
ntrain = FLAGS.ntrain
nval = FLAGS.nval
ntest = FLAGS.ntest
folder = os.path.join('ckpt_mnist_imbalance_cnn_p{:d}'.format(int(FLAGS.pos_ratio * 100.0)),
task_name + '_{:d}'.format(seed))
if not os.path.exists(folder):
os.makedirs(folder)
with tf.Graph().as_default(), tf.Session() as sess:
bsize = batchsize
x_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x')
y_ = tf.placeholder(tf.float32, [None], name='y')
x_val_ = tf.placeholder(tf.float32, [None, x.shape[1]], name='x_val')
y_val_ = tf.placeholder(tf.float32, [None], name='y_val')
ex_wts_ = tf.placeholder(tf.float32, [None, 1], name='ex_wts')
ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b')
lr_ = tf.placeholder(tf.float32, [], name='lr')
# Build training model.
with tf.name_scope('Train'):
_, loss_c, logits_c = get_model(
x_, y_, is_training=True, dtype=tf.float32, w_dict=None, ex_wts=ex_wts_, reuse=None)
train_op = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(loss_c)
# metric_ = get_metric(logits_c, y_)
# Build evaluation model.
with tf.name_scope('Val'):
_, loss_eval, logits_eval = get_model(
x_,
y_,
is_training=False,
dtype=tf.float32,
w_dict=None,
ex_wts=ex_wts_,
reuse=True)
metric_ = get_metric(logits_eval, y_)
# Build reweighting model.
if reweight:
if random:
ex_weights_ = reweight_random(bsize)
else:
ex_weights_, var_list_, grads_, grads_w_ = reweight_autodiff(
x_,
y_,
x_val_,
y_val_,
ex_wts_,
ex_wts_b,
bsize,
clean_data_num,
eps=0.1,
gate_gradients=1)
else:
if hard_mining:
ex_weights_ = reweight_hard_mining(x_, y_, positive=True)
else:
if ratio_weighted:
# Weighted by the ratio of each class.
ex_weights_ = pos_ratio * (1 - y_) + (1 - pos_ratio) * (y_)
else:
# Weighted by uniform.
ex_weights_ = tf.ones([bsize], dtype=tf.float32) / float(bsize)
if FLAGS.tensorboard:
exp_logger = get_exp_logger(sess, folder)
else:
exp_logger = None
num_steps = 10
acc_sum = 0.0
acc_test_sum = 0.0
loss_sum = 0.0
count = 0
sess.run(tf.global_variables_initializer())
history = pd.DataFrame([])
history_loss = []
history_loss_acc = []
history_metric_r2 = []
history_metric_mse = []
history_metric_mae = []
for i in range(2000):
kf = KFold(n_splits=2, shuffle=False, random_state=2020)
# for k, (train_index, val_index) in enumerate(kf.split(x)):
# x_batch, y_batch = x.iloc[train_index], y[target].iloc[train_index]
x_batch, y_batch = x, y[target]
ex_weights, var_list, grads, grads_w = sess.run(
[ex_weights_, var_list_, grads_, grads_w_], feed_dict={x_: x_batch,
y_: y_batch,
x_val_: x_val,
y_val_: y_val[target],
ex_wts_: np.ones((batchsize, 1)),
ex_wts_b: np.ones([clean_data_num, 1])})
# ww = var_list[0]
# bb = var_list[1]
# print(x_batch.shape)
# print(ww.shape)
# xx = np.matmul(np.array(x_batch), ww)
# xxx = xx + bb
# xxxx = xxx - np.array(y_batch).reshape(-1, 1)
# ss = (xxxx ** 2) / 2
# sss = np.mean(ss)
# ww_xx = xxxx.reshape(1, -1).dot(np.array(x_batch))
# re_xx = np.mean(np.abs(xxxx))
pred_tra, loss, acc, _ = sess.run(
[logits_c, loss_c, metric_, train_op],
feed_dict={
x_: x_batch,
y_: y_batch,
x_val_: x_val,
y_val_: y_val[target],
ex_wts_: ex_weights,
lr_: lr
})
print(np.unique(ex_weights))
pred = sess.run(logits_eval, feed_dict={x_: test_set, y_: test_set_label[target], ex_wts_: ex_weights})
r2 = r2_score(pred, test_set_label[target])
mse = mean_squared_error(pred, test_set_label[target])
mae = mean_absolute_error(pred, test_set_label[target])
history_loss.append(loss)
history_loss_acc.append(acc)
history_metric_r2.append(r2)
history_metric_mse.append(mse)
history_metric_mae.append(mae)
# Final evaluation.
history['loss'] = history_loss
history['acc'] = history_loss_acc
history['r2'] = history_metric_r2
history['mse'] = history_metric_mse
history['mae'] = history_metric_mae
pred_tra = sess.run(logits_eval, feed_dict={x_: x, y_: y[target], ex_wts_: ex_weights})
train_r2 = r2_score(pred_tra, y[target])
train_r2_ad = None
train_mse = mean_squared_error(pred_tra, y[target])
train_mae = mean_absolute_error(pred_tra, y[target])
train_mape = None
val_r2, val_r2_ad, val_mse, val_mae, val_mape, = None, None, None, None, None
test_r2, test_r2_ad, test_mse, test_mae, test_mape = r2, None, mse, mae, None
dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape',
'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape',
'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'],
[train_r2, train_r2_ad, train_mse, train_mae, train_mape,
val_r2, val_r2_ad, val_mse, val_mae, val_mape,
test_r2, test_r2_ad, test_mse, test_mae, test_mape,
]))
metric_df = pd.DataFrame.from_dict([dict_])
return metric_df, pd.DataFrame([]), pd.DataFrame([])
def main():
metric_df_all = pd.DataFrame([])
test_prediction_all = pd.DataFrame([]) # for prediction of test data
history_df_all = pd.DataFrame([]) # for keras model
for i, trial in enumerate(tqdm(six.moves.xrange(FLAGS.nrun))):
print('rnum : {}'.format(i))
seed = (trial * 2718) % 2020 # a different random seed for each run
train_data, test_data = load_data_(datasets_name, task_name)
metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name)
metric_df_all = pd.concat([metric_df_all, metric_df], axis=0)
test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1)
history_df_all = pd.concat([history_df_all, history_df], axis=1)
for col in metric_df_all.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(),
metric_df_all[col].std(),
metric_df_all[col].max(),
metric_df_all[col].median(),
metric_df_all[col].min()))
metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
# test_prediction_all.columns = ['ab_time', 'ab_time_enh']
test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits))
plt.show()
pass
np.random.seed(2020)
datasets_name = 'LiverAblation'
task_name = 'ablation_time_learn_weight' # ablation_time_enh / ablation_time_vanilla / relapse_risk
nrun = 10 # num of repeated experiments
clean_ratio = 1 # 1 for No label confusion
test_ratio = 0 # test data ratio for label confusion
val_ratio = 0 # val data ratio for label confusion
n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data
epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch
batchsize = 348
lr = 1e-4
clean_data_num = 10
reweight = True
num_steps = NUM_STEPS
random = False
ratio_weighted = False
hard_mining = False
if __name__ == '__main__':
main()
|
import copy
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from keras.models import load_model
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
from keras.callbacks import EarlyStopping
from model.bulid_model import classifer_, regression_, label_correction
from model.evaluate import evaluate_classification, evaluate_regression
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def f1(y_true, y_pred):
return 2 * precision(y_true, y_pred) * \
recall(y_true, y_pred) / (precision(y_true, y_pred) + recall(y_true, y_pred) + 1e-7)
# model compile and fit
def model_training(model, train_sets, train_label, val_data, val_label, lr, task, epoch, batch_size, patience=100):
if task == 'classification':
metrics = ['acc', f1, precision, recall]
loss = 'binary_crossentropy'
val_metric = 'val_f1'
elif task == 'regression':
metrics = ['mse', 'mae', r2]
metrics = [r2]
loss = 'mean_squared_error'
val_metric = 'val_r2'
model.compile(optimizer=RMSprop(lr=lr), loss=loss, metrics=metrics)
model.summary()
if val_label is None:
history = model.fit(train_sets, train_label,
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
else:
history = model.fit(train_sets, train_label,
# validation_split=0.3,
validation_data=(val_data, val_label),
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
return history, model
# select model
def training_model(train_set, train_set_label, task_name, train_index, val_index, test_set, test_set_label,
epoch, batchsize, iter_=None, step_=None, target='label', seed=2020, label_corr_epoch=2):
if train_index is not None:
train_x, val_x = train_set.iloc[train_index], train_set.iloc[val_index]
train_y, val_y = train_set_label.iloc[train_index], train_set_label.iloc[val_index]
val_label = val_y[target]
val_suplabel = val_y['sup_label']
val_x_time = val_x.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
else:
train_x = train_set
train_y = train_set_label
val_x = test_set
val_x_time = test_set.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
val_label = test_set_label[target]
val_suplabel = test_set_label['sup_label']
train_x_time = train_x.drop(columns=train_x.filter(regex=r'术后|出院|Post').columns)
test_set_time = test_set.drop(columns=test_set.filter(regex=r'术后|出院|Post').columns)
# train_x_time.to_csv('train_data.csv', encoding='gb18030')
train_data_raw = pd.read_csv('train_data.csv', encoding='gb18030')
xx = set(train_data_raw.columns) - set(train_x_time.columns)
rr = set(train_x_time.columns) - set(train_data_raw.columns)
if 'risk' in task_name:
classifer, att_weight = classifer_(train_x)
# epoch=130 for training whole data 107
# lr=8e-5 batchsize=8 patience= 90
history, model = model_training(classifer,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
8e-5, 'classification', 120, 16, 190)
metric = evaluate_classification(model,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
[test_set, test_set_label[target]], test_set_label['sup_label'])
test_pred = model.predict([test_set, test_set_label[target]])
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = history_df.shape[0] # count the number of epoch
elif 'vanilla' in task_name:
regression = regression_(train_x_time)
# epoch=2926 for training whole data 2709 for non-relapse data
# lr=9e-6 batchsize=256 patience= 350
history, model = model_training(regression, train_x_time, train_y[target], val_x_time, val_label,
9e-6, 'regression', 15000, batchsize, 2500) #240 2335
metric = evaluate_regression(model, train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = len(history.history['loss']) # count the number of epoch
elif 'load' in task_name:
model = load_model('ablation_time_enh_10nrun_1Fold.h5', custom_objects={'r2': r2})
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame([])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
len_ = 0
elif 'enh' in task_name:
history_df = pd.DataFrame([])
classifer, att_weight = classifer_(train_x)
# lr=8e-5 batchsize=16 epoch= 120
history, classifer = model_training(classifer,
[train_set, train_set_label[target]], train_set_label['sup_label'],
[pd.DataFrame([]), None], None,
8e-5, 'classification', 120, 16, 130)
label_target = copy.deepcopy(train_set_label[target])
regression_enh = regression_(train_x_time)
len_ = 0
for i in range(label_corr_epoch):
print('iter {}'.format(i))
label_target = label_correction(classifer, train_set, label_target, iter_=iter_, step_=step_)
# label_target = train_y[target]
if train_index is not None:
label_target_train = label_target.iloc[train_index]
val_label = label_target.iloc[val_index]
else:
label_target_train = label_target
# lr=9e-6 batchsize=256 epoch= 600
history, model = model_training(regression_enh,
train_x_time, label_target_train, val_x_time, val_label,
7e-5, 'regression', 225, batchsize, 220,)
# 1e-5, 'regression', 1750, batchsize, 2120, )
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
if history_df.empty:
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
else:
history_df = pd.concat([history_df, pd.DataFrame.from_dict(history.history, orient='columns')], axis=0)
len_ += history_df.shape[0] # count the number of epoch
history_df.reset_index(drop=True, inplace=True)
if train_index is not None:
val_pred = model.predict(val_x_time)
risk = classifer.predict([val_x, train_set_label[target].iloc[val_index]])
risk_corr = classifer.predict([val_x, val_pred])
risk_change = risk - risk_corr
risk_change_max = risk_change.max()
risk_change_mean = risk_change.mean()
x = 1
elif 'lr' in task_name:
model = LassoCV(random_state=seed)
# model = RidgeCV()
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
elif 'gbm' in task_name:
model = lgb.LGBMRegressor(
max_depth=3,
bagging_fraction=0.5,
feature_fraction=0.5,
reg_alpha=1,
reg_lambda=1,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
return model, history_df, metric, test_pred, len_
--- FILE SEPARATOR ---
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
import pandas as pd
import numpy as np
from preprocess import plot_tabel
def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0):
if test_retio == 0 or test_data is not None:
train_set = train_data
test_set = test_data
else:
train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed)
if clean_ratio < 1:
train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed)
label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'],
scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape)
alpha = 1
beta = 1
train_label_ = train_set_[target] + \
alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib
train_set_[target] = train_label_
train_set_['sup_label'] = 1
train_set_clean['sup_label'] = 0
test_set['sup_label'] = 0
else:
train_set_ = None
train_set_clean = train_set
train_set_mix = pd.concat([train_set_, train_set_clean], axis=0)
# mix_ratio = train_set[train_set[target] != train_set_mix[target]].index
# print('real mix ratio is {}'.format(mix_ratio))
if val_ratio > 0:
train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed)
val_set_label = val_set[[target, 'sup_label']]
val_set.drop(columns=[target, 'sup_label'], inplace=True)
else:
val_set = None
val_set_label = None
train_set_mix_label = train_set_mix[[target, 'sup_label']]
test_set_label = test_set[[target, 'sup_label']]
# plot_tabel.metric_hist(test_set, nor)
train_set_mix.drop(columns=[target, 'sup_label'], inplace=True)
test_set.drop(columns=[target, 'sup_label'], inplace=True)
return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label
def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'):
if test_data is not None:
train_data['tab'] = 1
test_data['tab'] = 0
data_raw = pd.concat([train_data, test_data], axis=0)
print('\ndata_raw', data_raw.shape)
data = data_raw.dropna(axis=1, how='all')
xx = data.isnull().sum()
data = data.fillna(0)
if ca_co_sel_flag:
ca_col = []
co_col = []
data_columns_label = data.filter(regex=r'label').columns
data_columns = data.columns.drop(data_columns_label)
# data_columns = data.columns.drop(['sup_label'])
for col in data_columns:
data_col = data[col]
col_feat_num = len(set(data_col))
if col_feat_num > ca_feat_th:
col_ = col + '_dense'
co_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
elif ca_feat_th >= col_feat_num > 1:
col_ = col + '_sparse'
ca_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
else:
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])])
data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform)
if onehot_flag:
data = pd.get_dummies(data, columns=ca_col)
co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃
mms = MinMaxScaler(feature_range=(0.1, 1.1))
std = StandardScaler()
xx = data.filter(regex=r'label').describe()
xx_col = xx.index
xx_min = xx.loc['min', :]
xx_max = xx.loc['max', :]
xx_std = xx.loc['std', :]
data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index)
if test_data is not None:
train_data = data[data['tab'] == 1].drop(columns=['tab'])
test_data = data[data['tab'] == 0].drop(columns=['tab'])
else:
train_data = data
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
return train_data, test_data, co_col, ca_col, std
def anomaly_dectection(train_data=None, test_data=None, target='label'):
clean_data = []
for data in [train_data, test_data]:
if not data.empty:
std_ = data[target].std()
mean_ = data[target].mean()
data = data[data[target] < mean_ + 3 * std_]
data = data[data[target] > mean_ - 3 * std_]
clean_data.append(data)
return clean_data[0], clean_data[1]
--- FILE SEPARATOR ---
# Copyright (c) 2017 - 2019 Uber Technologies, Inc.
#
# Licensed under the Uber Non-Commercial License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at the root directory of this project.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Models for MNIST experiments.
#
from __future__ import division, print_function
import numpy as np
import tensorflow as tf
def get_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None,
):
"""
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list()))
shape_list_wts = np.append(np.array([-1]), np.squeeze(ex_wts.shape[1:].as_list()))
shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([1024]))
shape_list_sec = np.array([1024, 256])
shape_list_thr = np.array([256, 64])
inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype)
inputs_w = tf.cast(tf.reshape(ex_wts, shape_list_wts), dtype)
# inputs_w = tf.matrix_diag(ex_wts)
labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype)
w_init = tf.truncated_normal_initializer(stddev=0.1)
w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init)
w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init)
w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init)
w4 = _get_var('w4', [64, 32], dtype, initializer=w_init)
w5 = _get_var('w5', [32, 1], dtype, initializer=w_init)
b_init = tf.constant_initializer(0.0)
b1 = _get_var('b1', 1, dtype, initializer=b_init)
b2 = _get_var('b2', 1, dtype, initializer=b_init)
b3 = _get_var('b3', 64, dtype, initializer=b_init)
b4 = _get_var('b4', 32, dtype, initializer=b_init)
b5 = _get_var('b5', 1, dtype, initializer=b_init)
act = tf.nn.relu
l0 = tf.identity(inputs_, name='l0')
z1 = tf.add(tf.matmul(l0, w1), b1, name='z1')
l1 = act(z1, name='l1')
# h1 = tf.contrib.layers.batch_norm(l1, center=True, scale=True, is_training=True, scope='bn1')
z2 = tf.add(tf.matmul(l1, w2), b2, name='z2')
l2 = act(z2, name='l2')
# h2 = tf.contrib.layers.batch_norm(l2, center=True, scale=True, is_training=True, scope='bn2')
z3 = tf.add(tf.matmul(l2, w3), b3, name='z3')
l3 = act(z3, name='l3')
# h3 = tf.contrib.layers.batch_norm(l3, center=True, scale=True, is_training=True, scope='bn3')
z4 = tf.add(tf.matmul(l3, w4), b4, name='z4')
l4 = act(z4, name='l4')
# h4 = tf.contrib.layers.batch_norm(l4, center=True, scale=True, is_training=True, scope='bn4')
z5 = tf.add(tf.matmul(l4, w5), b5, name='z5')
pred = z5
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(tf.square(tf.subtract(pred, labels)))
else:
# Weighted loss.
squa = tf.square(tf.subtract(pred, labels)) * inputs_w
mse = tf.nn.l2_loss(tf.subtract(pred, labels)) * inputs_w
loss = tf.reduce_mean(squa)
return w_dict, loss, pred
def reweight_random(bsize, eps=0.0):
"""Reweight examples using random numbers.
:param bsize: [int] Batch size.
:param eps: [float] Minimum example weights, default 0.0.
"""
ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0)
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_autodiff(inp_a,
label_a,
inp_b,
label_b,
ex_wts_a,
ex_wts_b,
bsize_a,
bsize_b,
eps=0,
gate_gradients=1):
"""Reweight examples using automatic differentiation.
:param inp_a: [Tensor] Inputs for the noisy pass.
:param label_a: [Tensor] Labels for the noisy pass.
:param inp_b: [Tensor] Inputs for the clean pass.
:param label_b: [Tensor] Labels for the clean pass.
:param bsize_a: [int] Batch size for the noisy pass.
:param bsize_b: [int] Batch size for the clean pass.
:param eps: [float] Minimum example weights, default 0.0.
:param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency.
"""
# ex_wts_a = tf.ones([bsize_a], dtype=tf.float32)
# ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b)
# ex_wts_b = tf.placeholder(tf.float32, [None, 1], name='ex_wts_b')
w_dict, loss_a, logits_a = get_model(
inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True)
var_names = w_dict.keys()
var_list = [w_dict[kk] for kk in var_names]
grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients)
# grads_w = tf.gradients(loss_a, [ex_wts_a], gate_gradients=gate_gradients)
var_list_new = [vv - gg for gg, vv in zip(grads, var_list)]
w_dict_new = dict(zip(var_names, var_list_new))
_, loss_b, logits_b = get_model(
inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new)
grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0]
ex_weight = -grads_ex_wts
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm, var_list, grads, ex_weight_plus
def reweight_hard_mining(inp, label, positive=False):
"""Reweight examples using hard mining.
:param inp: [Tensor] [N, ...] Inputs.
:param label: [Tensor] [N] Labels
:param positive: [bool] Whether perform hard positive mining or hard negative mining.
:return [Tensor] Examples weights of the same shape as the first dim of inp.
"""
_, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True)
# Mine for positive
if positive:
loss_mask = loss * label
else:
loss_mask = loss * (1 - label)
if positive:
k = tf.cast(tf.reduce_sum(1 - label), tf.int32)
else:
k = tf.cast(tf.reduce_sum(label), tf.int32)
k = tf.maximum(k, 1)
loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k)
if positive:
mask = 1 - label
else:
mask = label
updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype)
mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]])
mask = tf.maximum(mask, mask_add)
mask_sum = tf.reduce_sum(mask)
mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32)
mask = mask / mask_sum
return mask
--- FILE SEPARATOR ---
#coding=gb18030
import numpy as np
import pandas as pd
def load_data_(datasets, task_name='', seed=2020):
if datasets == 'winequality_white':
data_path = '../DataSet/wine/{}.csv'.format(datasets)
data = pd.read_csv(data_path)
data.rename(columns={'quality': 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
train_data = data.fillna(0)
test_data = None
elif datasets == 'PPH':
data_path = '../DataSet/PPH/{}.csv'.format(datasets)
data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030')
data = pd.read_csv(data_path, encoding='gb18030', index_col='index')
col = []
for col_ in data.columns:
col.append(col_ + np.squeeze(data_head[col_].values))
data.columns = np.array(col)
# data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030')
data['sup_label'] = 0
label_col = data.filter(regex=r'n61').columns.values[0]
data.rename(columns={label_col: 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values
data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x)
data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values
data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x)
data['minutes'] += data['hours'] * 60
drop_columns = data.filter(
regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns
train_data = data.drop(columns=drop_columns)
# data.fillna(0, inplace=True)
test_data = None
elif datasets == 'LiverAblation':
data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets)
data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse')
# data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets)
# data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse')
data.rename(columns={'time_dense': 'label'}, inplace=True)
data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True)
drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns
data.drop(columns=drop_columns_, inplace=True)
data_1 = data.loc[data['sup_label'] == 1]
data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed)
data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed)
test_data = data.drop(index=data_undersmapling.index)
if 'non' in task_name:
train_data = data_0
else:
train_data = data_undersmapling
else:
train_data = None
test_data = None
return train_data, test_data
|
{
"imported_by": [],
"imports": [
"/Regression/src/model/training_.py",
"/Regression/src/preprocess/get_dataset.py",
"/Regression/src/learn_rewieght/reweight.py",
"/Regression/src/preprocess/load_data.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/main.py
|
import numpy as np
import pandas as pd
import six
from tqdm import tqdm
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
from model.training_ import training_model, model_training, precision, recall, f1, r2
from model.history_ import plot_history_df
def run(train_data, test_data, seed, task_name, target='label'):
train_data, test_data, co_col, ca_col, nor = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=True)
_, test_data = anomaly_dectection(train_data, test_data)
# train_data, test_data = anomaly_dectection(train_data, test_data)# Outlier detection
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(nor,train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio,)# label confusion according to requirements
metric_df = pd.DataFrame([])
test_prediction = pd.DataFrame([])
history_df = pd.DataFrame([])
history_list = []
epoch_len_list = []
if n_splits > 1:
kf = KFold(n_splits=n_splits, shuffle=False, random_state=seed)
for k, (train_index, val_index) in enumerate(kf.split(train_set_mix)):
print('KFlod in : {}'.format(k))
model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label,
task_name, train_index, val_index,
test_set, test_set_label,
epoch, batchsize, iter_, step_, target, seed)
metric_df = pd.concat([metric_df, metric_], axis=0)
history_df = pd.concat([history_df, history_], axis=1)
history_list.append(history_)
test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1)
epoch_len_list.append(epoch_len)
plot_history_df(history_list, task_name)
print('epoch_len_mean', np.mean(epoch_len_list)) # mean epoch in kflod cross validation
else:
model_, history_, metric_, test_pred_, epoch_len = training_model(train_set_mix, train_set_mix_label,
task_name, None, None,
test_set, test_set_label,
epoch, batchsize, iter_, step_, target, seed)
metric_df = pd.concat([metric_df, metric_], axis=0)
test_prediction = pd.concat([test_prediction, pd.DataFrame(test_pred_)], axis=1)
history_df = pd.concat([history_df, history_], axis=1)
history_list.append(history_)
plot_history_df(history_list, task_name, val_flag='val_')
try:
model_.save('{}_{}nrun_{}Fold.h5'.format(task_name, nrun, n_splits))
except:
print('Failed to save model')
return metric_df, test_prediction, history_df
np.random.seed(2020)
datasets_name = 'LiverAblation'
task_name = 'ablation_time_load' # ablation_time_enh / ablation_time_vanilla / relapse_risk
nrun = 10 # num of repeated experiments
clean_ratio = 1 # 1 for No label confusion
test_ratio = 0 # test data ratio for label confusion
val_ratio = 0 # val data ratio for label confusion
n_splits = 1 # n_splits > 1 for Kfold cross validation / n_splits==1 for training all data
epoch = 5000 # Kfold cross validation: a large number / training all data: mean epoch
batchsize = 256
iter_ = 2 # Number of iterations for label modification
step_ = 0.0001 # learning rate for label modification
def main():
metric_df_all = pd.DataFrame([])
test_prediction_all = pd.DataFrame([]) # for prediction of test data
history_df_all = pd.DataFrame([]) # for keras model
for i, trial in enumerate(tqdm(six.moves.xrange(nrun))):
print('rnum : {}'.format(i))
seed = (trial * 2718) % 2020 # a different random seed for each run
train_data, test_data = load_data_(datasets_name, task_name,seed)
metric_df, test_prediction, history_df = run(train_data, test_data, seed, task_name)
metric_df_all = pd.concat([metric_df_all, metric_df], axis=0)
test_prediction_all = pd.concat([test_prediction_all, test_prediction], axis=1)
history_df_all = pd.concat([history_df_all, history_df], axis=1)
for col in metric_df_all.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df_all[col].mean(),
metric_df_all[col].std(),
metric_df_all[col].max(),
metric_df_all[col].median(),
metric_df_all[col].min()))
metric_df_all.to_csv('./metric_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
history_df_all.to_csv('./history_{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits), index=False)
# test_prediction_all.columns = ['ab_time', 'ab_time_enh']
test_prediction_all.to_csv('./prediction{}_{}nrun_{}Fold.csv'.format(task_name, nrun, n_splits))
plt.show()
pass
if __name__ == '__main__':
main()
pass
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
plt.rc('font', family='Times New Roman')
font_size = 16
def plot_metric_df(history_list, task_name, val_flag='test_'):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['r2', 'mae', 'mse']
fig = plt.figure(figsize=(20, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_metric(history_list, metric, val_flag)
fig.subplots_adjust(top=0.8)
legend_labels = ['ours',
# 'enh_nonrelapse',
'ATT+MLP',
# 'vanilla_nonrelapse',
'LGB',
# 'lightgbm_nonrelapse',
'Lasso',
# 'lasso_nonrelapse'
]
plt.legend(labels= legend_labels,
ncol = len(legend_labels),
# loc='best',
loc='upper center',
fontsize=14,
bbox_to_anchor=(-1.2, 1, 1, 0.2),
borderaxespad = 0.,
)
# plt.title('{} {}'.format(task_name, metric), fontsize=font_size)
def show_metric(history_list, metrics_name, val_flag=''):
marker_list = ['*', 'd', 's', 'x', 'o']
metrics_name_dict = {'r2':'R-square', 'mae':'mean absolute error', 'mse':'mean squared error'}
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}\b'.format(val_flag, metrics_name))[:3000]
plt.plot(history_metric, linestyle=':', marker=marker_list[m], linewidth=2)
plt.xticks(range(0, 11), fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(metrics_name_dict[metrics_name], fontsize=font_size)
plt.xlabel('Round', fontsize=font_size)
def plot_history_df(history_list, task_name, val_flag=''):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_history(history_list, metric, val_flag)
plt.legend(labels=['attention', 'attention+mlp', 'attention+label corrected',
'attention+mlp+label corrected(ours)', 'mlp', 'mlp+label corrected'],
fontsize=14)
# plt.title('{} {}'.format(metric, task_name), fontsize=font_size)
def show_history(history_list, metrics_name, val_flag=''):
marker_list = ['^', 'd', 's', '*', 'x', 'o']
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}'.format(val_flag, metrics_name))[:3000]
history_ = np.mean(history_metric, axis=1)
len_ = history_.shape[0]
plt.plot(history_, linewidth=2, marker=marker_list[m], markevery=200)
plt.fill_between(range(len_), np.min(history_metric, axis=1), np.max(history_metric, axis=1), alpha=0.3)
plt.xticks(fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(val_flag + metrics_name, fontsize=font_size)
plt.xlabel('Epoch', fontsize=font_size)
def plot_history(history_list, task_name, val_flag=False):
if task_name == 'relapse_risk':
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
for i, metric in enumerate(metric_list):
plt.subplot(squrt(), L, i+1)
show_train_history(history_list, metric)
if val_flag:
show_train_history(history_list, 'val_{}'.format(metric))
plt.legend(labels=[metric, 'val_{}'.format(metric)], loc='upper left')
plt.title('{} {}'.format(task_name, metric))
def history_save(history_list, history_name):
history_all = pd.DataFrame([])
for history in history_list:
history_ = pd.DataFrame.from_dict(history.history, orient='index')
history_all = pd.concat([history_all, history_], axis=0)
history_all.to_csv('./hitory_{}.csv'.format(history_name))
def show_train_history(history_list, metrics_name):
metrics_list = None
for history in history_list:
history_metric = pd.DataFrame(np.array(history.history[metrics_name]).reshape(1, -1))
if metrics_list is None:
metrics_list = history_metric
else:
metrics_list = pd.concat([metrics_list, history_metric], axis=0)
# metrics = np.median(metrics_list, axis=0)
metrics = np.mean(metrics_list, axis=0)
plt.plot(metrics)
plt.ylabel(metrics_name)
plt.xlabel('Epoch')
--- FILE SEPARATOR ---
import copy
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from keras.models import load_model
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
from keras.callbacks import EarlyStopping
from model.bulid_model import classifer_, regression_, label_correction
from model.evaluate import evaluate_classification, evaluate_regression
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def f1(y_true, y_pred):
return 2 * precision(y_true, y_pred) * \
recall(y_true, y_pred) / (precision(y_true, y_pred) + recall(y_true, y_pred) + 1e-7)
# model compile and fit
def model_training(model, train_sets, train_label, val_data, val_label, lr, task, epoch, batch_size, patience=100):
if task == 'classification':
metrics = ['acc', f1, precision, recall]
loss = 'binary_crossentropy'
val_metric = 'val_f1'
elif task == 'regression':
metrics = ['mse', 'mae', r2]
metrics = [r2]
loss = 'mean_squared_error'
val_metric = 'val_r2'
model.compile(optimizer=RMSprop(lr=lr), loss=loss, metrics=metrics)
model.summary()
if val_label is None:
history = model.fit(train_sets, train_label,
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
else:
history = model.fit(train_sets, train_label,
# validation_split=0.3,
validation_data=(val_data, val_label),
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
return history, model
# select model
def training_model(train_set, train_set_label, task_name, train_index, val_index, test_set, test_set_label,
epoch, batchsize, iter_=None, step_=None, target='label', seed=2020, label_corr_epoch=2):
if train_index is not None:
train_x, val_x = train_set.iloc[train_index], train_set.iloc[val_index]
train_y, val_y = train_set_label.iloc[train_index], train_set_label.iloc[val_index]
val_label = val_y[target]
val_suplabel = val_y['sup_label']
val_x_time = val_x.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
else:
train_x = train_set
train_y = train_set_label
val_x = test_set
val_x_time = test_set.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
val_label = test_set_label[target]
val_suplabel = test_set_label['sup_label']
train_x_time = train_x.drop(columns=train_x.filter(regex=r'术后|出院|Post').columns)
test_set_time = test_set.drop(columns=test_set.filter(regex=r'术后|出院|Post').columns)
# train_x_time.to_csv('train_data.csv', encoding='gb18030')
train_data_raw = pd.read_csv('train_data.csv', encoding='gb18030')
xx = set(train_data_raw.columns) - set(train_x_time.columns)
rr = set(train_x_time.columns) - set(train_data_raw.columns)
if 'risk' in task_name:
classifer, att_weight = classifer_(train_x)
# epoch=130 for training whole data 107
# lr=8e-5 batchsize=8 patience= 90
history, model = model_training(classifer,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
8e-5, 'classification', 120, 16, 190)
metric = evaluate_classification(model,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
[test_set, test_set_label[target]], test_set_label['sup_label'])
test_pred = model.predict([test_set, test_set_label[target]])
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = history_df.shape[0] # count the number of epoch
elif 'vanilla' in task_name:
regression = regression_(train_x_time)
# epoch=2926 for training whole data 2709 for non-relapse data
# lr=9e-6 batchsize=256 patience= 350
history, model = model_training(regression, train_x_time, train_y[target], val_x_time, val_label,
9e-6, 'regression', 15000, batchsize, 2500) #240 2335
metric = evaluate_regression(model, train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = len(history.history['loss']) # count the number of epoch
elif 'load' in task_name:
model = load_model('ablation_time_enh_10nrun_1Fold.h5', custom_objects={'r2': r2})
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame([])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
len_ = 0
elif 'enh' in task_name:
history_df = pd.DataFrame([])
classifer, att_weight = classifer_(train_x)
# lr=8e-5 batchsize=16 epoch= 120
history, classifer = model_training(classifer,
[train_set, train_set_label[target]], train_set_label['sup_label'],
[pd.DataFrame([]), None], None,
8e-5, 'classification', 120, 16, 130)
label_target = copy.deepcopy(train_set_label[target])
regression_enh = regression_(train_x_time)
len_ = 0
for i in range(label_corr_epoch):
print('iter {}'.format(i))
label_target = label_correction(classifer, train_set, label_target, iter_=iter_, step_=step_)
# label_target = train_y[target]
if train_index is not None:
label_target_train = label_target.iloc[train_index]
val_label = label_target.iloc[val_index]
else:
label_target_train = label_target
# lr=9e-6 batchsize=256 epoch= 600
history, model = model_training(regression_enh,
train_x_time, label_target_train, val_x_time, val_label,
7e-5, 'regression', 225, batchsize, 220,)
# 1e-5, 'regression', 1750, batchsize, 2120, )
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
if history_df.empty:
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
else:
history_df = pd.concat([history_df, pd.DataFrame.from_dict(history.history, orient='columns')], axis=0)
len_ += history_df.shape[0] # count the number of epoch
history_df.reset_index(drop=True, inplace=True)
if train_index is not None:
val_pred = model.predict(val_x_time)
risk = classifer.predict([val_x, train_set_label[target].iloc[val_index]])
risk_corr = classifer.predict([val_x, val_pred])
risk_change = risk - risk_corr
risk_change_max = risk_change.max()
risk_change_mean = risk_change.mean()
x = 1
elif 'lr' in task_name:
model = LassoCV(random_state=seed)
# model = RidgeCV()
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
elif 'gbm' in task_name:
model = lgb.LGBMRegressor(
max_depth=3,
bagging_fraction=0.5,
feature_fraction=0.5,
reg_alpha=1,
reg_lambda=1,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
return model, history_df, metric, test_pred, len_
--- FILE SEPARATOR ---
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
import pandas as pd
import numpy as np
from preprocess import plot_tabel
def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0):
if test_retio == 0 or test_data is not None:
train_set = train_data
test_set = test_data
else:
train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed)
if clean_ratio < 1:
train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed)
label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'],
scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape)
alpha = 1
beta = 1
train_label_ = train_set_[target] + \
alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib
train_set_[target] = train_label_
train_set_['sup_label'] = 1
train_set_clean['sup_label'] = 0
test_set['sup_label'] = 0
else:
train_set_ = None
train_set_clean = train_set
train_set_mix = pd.concat([train_set_, train_set_clean], axis=0)
# mix_ratio = train_set[train_set[target] != train_set_mix[target]].index
# print('real mix ratio is {}'.format(mix_ratio))
if val_ratio > 0:
train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed)
val_set_label = val_set[[target, 'sup_label']]
val_set.drop(columns=[target, 'sup_label'], inplace=True)
else:
val_set = None
val_set_label = None
train_set_mix_label = train_set_mix[[target, 'sup_label']]
test_set_label = test_set[[target, 'sup_label']]
# plot_tabel.metric_hist(test_set, nor)
train_set_mix.drop(columns=[target, 'sup_label'], inplace=True)
test_set.drop(columns=[target, 'sup_label'], inplace=True)
return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label
def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'):
if test_data is not None:
train_data['tab'] = 1
test_data['tab'] = 0
data_raw = pd.concat([train_data, test_data], axis=0)
print('\ndata_raw', data_raw.shape)
data = data_raw.dropna(axis=1, how='all')
xx = data.isnull().sum()
data = data.fillna(0)
if ca_co_sel_flag:
ca_col = []
co_col = []
data_columns_label = data.filter(regex=r'label').columns
data_columns = data.columns.drop(data_columns_label)
# data_columns = data.columns.drop(['sup_label'])
for col in data_columns:
data_col = data[col]
col_feat_num = len(set(data_col))
if col_feat_num > ca_feat_th:
col_ = col + '_dense'
co_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
elif ca_feat_th >= col_feat_num > 1:
col_ = col + '_sparse'
ca_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
else:
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])])
data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform)
if onehot_flag:
data = pd.get_dummies(data, columns=ca_col)
co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃
mms = MinMaxScaler(feature_range=(0.1, 1.1))
std = StandardScaler()
xx = data.filter(regex=r'label').describe()
xx_col = xx.index
xx_min = xx.loc['min', :]
xx_max = xx.loc['max', :]
xx_std = xx.loc['std', :]
data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index)
if test_data is not None:
train_data = data[data['tab'] == 1].drop(columns=['tab'])
test_data = data[data['tab'] == 0].drop(columns=['tab'])
else:
train_data = data
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
return train_data, test_data, co_col, ca_col, std
def anomaly_dectection(train_data=None, test_data=None, target='label'):
clean_data = []
for data in [train_data, test_data]:
if not data.empty:
std_ = data[target].std()
mean_ = data[target].mean()
data = data[data[target] < mean_ + 3 * std_]
data = data[data[target] > mean_ - 3 * std_]
clean_data.append(data)
return clean_data[0], clean_data[1]
--- FILE SEPARATOR ---
#coding=gb18030
import numpy as np
import pandas as pd
def load_data_(datasets, task_name='', seed=2020):
if datasets == 'winequality_white':
data_path = '../DataSet/wine/{}.csv'.format(datasets)
data = pd.read_csv(data_path)
data.rename(columns={'quality': 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
train_data = data.fillna(0)
test_data = None
elif datasets == 'PPH':
data_path = '../DataSet/PPH/{}.csv'.format(datasets)
data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030')
data = pd.read_csv(data_path, encoding='gb18030', index_col='index')
col = []
for col_ in data.columns:
col.append(col_ + np.squeeze(data_head[col_].values))
data.columns = np.array(col)
# data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030')
data['sup_label'] = 0
label_col = data.filter(regex=r'n61').columns.values[0]
data.rename(columns={label_col: 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values
data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x)
data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values
data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x)
data['minutes'] += data['hours'] * 60
drop_columns = data.filter(
regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns
train_data = data.drop(columns=drop_columns)
# data.fillna(0, inplace=True)
test_data = None
elif datasets == 'LiverAblation':
data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets)
data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse')
# data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets)
# data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse')
data.rename(columns={'time_dense': 'label'}, inplace=True)
data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True)
drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns
data.drop(columns=drop_columns_, inplace=True)
data_1 = data.loc[data['sup_label'] == 1]
data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed)
data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed)
test_data = data.drop(index=data_undersmapling.index)
if 'non' in task_name:
train_data = data_0
else:
train_data = data_undersmapling
else:
train_data = None
test_data = None
return train_data, test_data
|
{
"imported_by": [],
"imports": [
"/Regression/src/model/history_.py",
"/Regression/src/model/training_.py",
"/Regression/src/preprocess/get_dataset.py",
"/Regression/src/preprocess/load_data.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/model/training_.py
|
import copy
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from keras.models import load_model
from keras import backend as K
from keras.optimizers import Adam, RMSprop, SGD
from keras.callbacks import EarlyStopping
from model.bulid_model import classifer_, regression_, label_correction
from model.evaluate import evaluate_classification, evaluate_regression
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def f1(y_true, y_pred):
return 2 * precision(y_true, y_pred) * \
recall(y_true, y_pred) / (precision(y_true, y_pred) + recall(y_true, y_pred) + 1e-7)
# model compile and fit
def model_training(model, train_sets, train_label, val_data, val_label, lr, task, epoch, batch_size, patience=100):
if task == 'classification':
metrics = ['acc', f1, precision, recall]
loss = 'binary_crossentropy'
val_metric = 'val_f1'
elif task == 'regression':
metrics = ['mse', 'mae', r2]
metrics = [r2]
loss = 'mean_squared_error'
val_metric = 'val_r2'
model.compile(optimizer=RMSprop(lr=lr), loss=loss, metrics=metrics)
model.summary()
if val_label is None:
history = model.fit(train_sets, train_label,
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
else:
history = model.fit(train_sets, train_label,
# validation_split=0.3,
validation_data=(val_data, val_label),
epochs=epoch,
batch_size=batch_size,
shuffle=True,
callbacks=[EarlyStopping(monitor=val_metric, patience=patience, mode='max')],
# callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)],
verbose=2,
)
return history, model
# select model
def training_model(train_set, train_set_label, task_name, train_index, val_index, test_set, test_set_label,
epoch, batchsize, iter_=None, step_=None, target='label', seed=2020, label_corr_epoch=2):
if train_index is not None:
train_x, val_x = train_set.iloc[train_index], train_set.iloc[val_index]
train_y, val_y = train_set_label.iloc[train_index], train_set_label.iloc[val_index]
val_label = val_y[target]
val_suplabel = val_y['sup_label']
val_x_time = val_x.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
else:
train_x = train_set
train_y = train_set_label
val_x = test_set
val_x_time = test_set.drop(columns=val_x.filter(regex=r'术后|出院|Post').columns)
val_label = test_set_label[target]
val_suplabel = test_set_label['sup_label']
train_x_time = train_x.drop(columns=train_x.filter(regex=r'术后|出院|Post').columns)
test_set_time = test_set.drop(columns=test_set.filter(regex=r'术后|出院|Post').columns)
# train_x_time.to_csv('train_data.csv', encoding='gb18030')
train_data_raw = pd.read_csv('train_data.csv', encoding='gb18030')
xx = set(train_data_raw.columns) - set(train_x_time.columns)
rr = set(train_x_time.columns) - set(train_data_raw.columns)
if 'risk' in task_name:
classifer, att_weight = classifer_(train_x)
# epoch=130 for training whole data 107
# lr=8e-5 batchsize=8 patience= 90
history, model = model_training(classifer,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
8e-5, 'classification', 120, 16, 190)
metric = evaluate_classification(model,
[train_x, train_y[target]], train_y['sup_label'],
[val_x, val_label], val_suplabel,
[test_set, test_set_label[target]], test_set_label['sup_label'])
test_pred = model.predict([test_set, test_set_label[target]])
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = history_df.shape[0] # count the number of epoch
elif 'vanilla' in task_name:
regression = regression_(train_x_time)
# epoch=2926 for training whole data 2709 for non-relapse data
# lr=9e-6 batchsize=256 patience= 350
history, model = model_training(regression, train_x_time, train_y[target], val_x_time, val_label,
9e-6, 'regression', 15000, batchsize, 2500) #240 2335
metric = evaluate_regression(model, train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
len_ = len(history.history['loss']) # count the number of epoch
elif 'load' in task_name:
model = load_model('ablation_time_enh_10nrun_1Fold.h5', custom_objects={'r2': r2})
test_pred = model.predict(test_set_time)
history_df = pd.DataFrame([])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
len_ = 0
elif 'enh' in task_name:
history_df = pd.DataFrame([])
classifer, att_weight = classifer_(train_x)
# lr=8e-5 batchsize=16 epoch= 120
history, classifer = model_training(classifer,
[train_set, train_set_label[target]], train_set_label['sup_label'],
[pd.DataFrame([]), None], None,
8e-5, 'classification', 120, 16, 130)
label_target = copy.deepcopy(train_set_label[target])
regression_enh = regression_(train_x_time)
len_ = 0
for i in range(label_corr_epoch):
print('iter {}'.format(i))
label_target = label_correction(classifer, train_set, label_target, iter_=iter_, step_=step_)
# label_target = train_y[target]
if train_index is not None:
label_target_train = label_target.iloc[train_index]
val_label = label_target.iloc[val_index]
else:
label_target_train = label_target
# lr=9e-6 batchsize=256 epoch= 600
history, model = model_training(regression_enh,
train_x_time, label_target_train, val_x_time, val_label,
7e-5, 'regression', 225, batchsize, 220,)
# 1e-5, 'regression', 1750, batchsize, 2120, )
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
test_pred = model.predict(test_set_time)
if history_df.empty:
history_df = pd.DataFrame.from_dict(history.history, orient='columns')
else:
history_df = pd.concat([history_df, pd.DataFrame.from_dict(history.history, orient='columns')], axis=0)
len_ += history_df.shape[0] # count the number of epoch
history_df.reset_index(drop=True, inplace=True)
if train_index is not None:
val_pred = model.predict(val_x_time)
risk = classifer.predict([val_x, train_set_label[target].iloc[val_index]])
risk_corr = classifer.predict([val_x, val_pred])
risk_change = risk - risk_corr
risk_change_max = risk_change.max()
risk_change_mean = risk_change.mean()
x = 1
elif 'lr' in task_name:
model = LassoCV(random_state=seed)
# model = RidgeCV()
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
elif 'gbm' in task_name:
model = lgb.LGBMRegressor(
max_depth=3,
bagging_fraction=0.5,
feature_fraction=0.5,
reg_alpha=1,
reg_lambda=1,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
model.fit(train_x_time, train_y[target])
metric = evaluate_regression(model,
train_x_time, train_y[target],
val_x_time, val_label,
test_set_time, test_set_label[target],
)
history_df = pd.DataFrame([])
len_ = 0
test_pred = model.predict(test_set_time)
return model, history_df, metric, test_pred, len_
|
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score, accuracy_score
from sklearn.preprocessing import MinMaxScaler
def evaluate_classification(model, train_sets, train_label, val_sets, val_label, test_sets, test_label):
relapse_risk_test = model.predict(test_sets)
relapse_risk_tra = model.predict(train_sets)
con_mat = confusion_matrix(test_label, relapse_risk_test.round())
train_acc = accuracy_score(train_label, relapse_risk_tra.round())
test_acc = accuracy_score(test_label, relapse_risk_test.round())
train_f1 = f1_score(train_label, relapse_risk_tra.round())
test_f1 = f1_score(test_label, relapse_risk_test.round())
val_acc = None
val_f1=None
if val_label is not None:
relapse_risk_val = model.predict(val_sets)
val_acc = accuracy_score(val_label, relapse_risk_val.round())
val_f1 = f1_score(val_label, relapse_risk_val.round())
dict_ = dict(zip(['train_acc', 'test_acc', 'val_acc', 'val_f1', 'train_f1', 'test_f1'],
[train_acc, test_acc, val_acc, val_f1, train_f1, test_f1]))
return pd.DataFrame([dict_])
def mape(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def wmape(y_true, y_pred):
return np.mean(np.abs(y_true - y_pred)) / np.mean(np.abs(y_true)) * 100
def smape(y_true, y_pred):
return 2.0 * np.mean(np.abs(y_pred - y_true) / (np.abs(y_pred) + np.abs(y_true))) * 100
def evaluate_regression(model, train_sets, train_label, val_x, val_label, test_sets, test_label):
test_target_pred = model.predict(test_sets)
train_target_pred = model.predict(train_sets)
num_data_tra = train_sets.shape[0]
num_feat_tra = train_sets.shape[1]
num_data_test = train_sets.shape[0]
num_feat_test = train_sets.shape[1]
train_r2 = r2_score(train_label, train_target_pred)
train_r2_ad = 1 - ((1 - train_r2) * (num_data_tra - 1)) / abs(num_data_tra - num_feat_tra - 1)
test_r2 = r2_score(test_label, test_target_pred)
test_r2_ad = 1 - ((1 - test_r2) * (num_data_test - 1)) / abs(num_data_test - num_feat_test - 1)
train_mse = mean_squared_error(train_label, train_target_pred)
train_mae = mean_absolute_error(train_label, train_target_pred)
test_mse = mean_squared_error(test_label, test_target_pred)
test_mae = mean_absolute_error(test_label, test_target_pred)
mms = MinMaxScaler(feature_range=(0.1, 1))
train_label_mms = mms.fit_transform(np.array(train_label).reshape(-1, 1))
test_label_mms = mms.fit_transform(np.array(test_label).reshape(-1, 1))
train_target_pred_mns = mms.fit_transform(train_target_pred.reshape(-1, 1))
test_target_pred_mns = mms.fit_transform(test_target_pred.reshape(-1, 1))
train_mape = wmape(train_label_mms, train_target_pred_mns.reshape(-1, ))
test_mape = wmape(test_label_mms, test_target_pred_mns.reshape(-1, ))
err = test_label - np.squeeze(test_target_pred)
if not val_x.empty:
val_target_pred = model.predict(val_x)
num_data_val = val_x.shape[0]
num_feat_val = val_x.shape[1]
val_r2 = r2_score(val_label, val_target_pred)
val_r2_ad = 1 - ((1 - val_r2) * (num_data_val - 1)) / abs(num_data_val - num_feat_val - 1)
val_mse = mean_squared_error(val_label, val_target_pred)
val_mae = mean_absolute_error(val_label, val_target_pred)
val_label_mms = mms.fit_transform(np.array(val_label).reshape(-1, 1))
val_target_pred_mns = mms.fit_transform(val_target_pred.reshape(-1, 1))
val_mape = smape(val_label_mms, val_target_pred_mns.reshape(-1, ))
else:
val_r2, val_r2_ad, val_mse, val_mae, val_mape = None, None, None, None, None
dict_ = dict(zip(['train_r2', 'train_r2_ad', 'train_mse', 'train_mae', 'train_mape',
'val_r2', 'val_r2_ad', 'val_mse', 'val_mae', 'val_mape',
'test_r2', 'test_r2_ad', 'test_mse', 'test_mae', 'test_mape'],
[train_r2, train_r2_ad, train_mse, train_mae, train_mape,
val_r2, val_r2_ad, val_mse, val_mae, val_mape,
test_r2, test_r2_ad, test_mse, test_mae, test_mape,
]))
return pd.DataFrame.from_dict([dict_])
--- FILE SEPARATOR ---
import tensorflow as tf
import numpy as np
import pandas as pd
from keras import backend as K
from keras import regularizers, activations
from keras.layers import Dense, Input, Add, Concatenate, Dropout, \
BatchNormalization, Activation, Multiply, Embedding, Layer, GlobalAveragePooling1D
from keras.models import Model
import copy
class Self_Attention(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(Self_Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(3, input_shape[2], self.output_dim),
initializer='uniform',
trainable=True)
super(Self_Attention, self).build(input_shape)
def call(self, x):
WQ = K.dot(x, self.kernel[0])
WK = K.dot(x, self.kernel[1])
WV = K.dot(x, self.kernel[2])
print("WQ.shape", WQ.shape)
print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape)
QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1]))
QK = QK / (x.shape.as_list()[1] ** 0.5)
QK = K.softmax(QK)
print("QK.shape", QK.shape)
V = K.batch_dot(QK, WV)
return V
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim)
class FM(Layer):
def __init__(self, output_dim, latent=32, activation='relu', **kwargs):
self.latent = latent
self.output_dim = output_dim
self.activation = activations.get(activation)
super(FM, self).__init__(**kwargs)
def build(self, input_shape):
self.b = self.add_weight(name='W0',
shape=(self.output_dim,),
trainable=True,
initializer='zeros')
self.w = self.add_weight(name='W',
shape=(input_shape[1], self.output_dim),
trainable=True,
initializer='random_uniform')
self.v= self.add_weight(name='V',
shape=(input_shape[1], self.latent),
trainable=True,
initializer='random_uniform')
super(FM, self).build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
x_square = K.square(x)
xv = K.square(K.dot(x, self.v))
xw = K.dot(x, self.w)
p = 0.5*K.sum(xv-K.dot(x_square, K.square(self.v)), 1)
rp = K.repeat_elements(K.reshape(p, (-1, 1)), self.output_dim, axis=-1)
f = xw + rp + self.b
output = K.reshape(f, (-1, self.output_dim))
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape)==2
return input_shape[0],self.output_dim
def Att(att_dim, inputs, name):
V = inputs
QK = Dense(att_dim//4, bias=None, activation='relu')(inputs)
QK = Dense(att_dim, bias=None, activation='relu')(QK)
QK = Activation("softmax", name=name)(QK)
MV = Multiply()([V, QK])
return(MV)
def regression_(train_x):
input_dim = train_x.shape[1]
l1_regul = 0
l2_regul = 0
input = Input(shape=(input_dim,))
# input_ = BatchNormalization()(input, training=False)
# input_fm = FM(input_dim)(input_)
# input_emb = Embedding(input_dim + 1, input_dim//2)(input)
# att = Self_Attention(input_dim//2)(input_emb)
# att = GlobalAveragePooling1D()(att)
atts1 = Att(input_dim, input, "attention_vec10")
# atts11 = Att(input_dim, input_, "attention_vec11")
# mlp_layer = Add()([atts1, atts11])
# mlp_layer = Att(input_dim, mlp_layer, "attention_vec20")
mlp_layer = atts1
for units_ in [64, 16]:
mlp_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(mlp_layer)
# mlp_layer = Dropout(0.5)(mlp_layer)
# mlp_layer = BatchNormalization()(mlp_layer, training=False)
# atts2 = Att(32, mlp_layer, "attention_vec2")
mlp_layer_output = Dense(1)(mlp_layer)
regression = Model(input=input, output=mlp_layer_output)
return regression
def classifer_(train_x):
input_dim = train_x.shape[1]
input_dim_emb = (input_dim + 1)
input_ = Input(shape=(input_dim,))
input_c = Input(shape=(1,))
l1_regul = 0
l2_regul = 0
# encoder layers
inputs = Concatenate()([input_, input_c])
atts1 = Att(input_dim_emb, inputs, "attention_vec10")
# atts2 = Att(input_dim + 1, inputs, "attention_vec11")
# input_fm = FM(input_dim + 1)(atts1)
encoded_layer = atts1
# encoded_layer = Concatenate()([atts1, atts2])
for units_ in [64]:
encoded_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(encoded_layer)
encoded_layer = Dropout(0.5)(encoded_layer)
encoded_layer = BatchNormalization()(encoded_layer, training=False)
encoder_output = Concatenate()([encoded_layer, input_c])
# decoder layers
decoded_layer = encoded_layer
for units_ in [16, 128, train_x.shape[1]]:
decoded_layer = Dense(units_, activation='relu',
kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l1_regul))(decoded_layer)
# decoded_layer = Dropout(0.2)(decoded_layer)
decoded_layer = BatchNormalization()(decoded_layer, training=False)
# classifer layers
classifer_layer = Dense(8, activation='relu', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(
encoded_layer)
classifer_layer = Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l1_l2(l1=l1_regul, l2=l2_regul))(
classifer_layer)
# encoder = Model(input=[input_, input_c], output=encoded_layer)
classifer = Model(input=[input_, input_c], output=classifer_layer)
# autoencoder = Model(input=[input_, input_c], output=decoded_layer)
att_weight = Model(input=[input_, input_c], output=atts1)
# classifer.add_loss(recon_loss(y_true=input_, y_pred=decoded_layer))
return classifer, att_weight
def eval_loss_and_grads(x, fetch_loss_and_grads):
outs = fetch_loss_and_grads(x)
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, fetch_loss_and_grads, iter, step, max_loss=None, min_loss=None):
"""get gradient
:param x: [dataframe list] inputs and label
:param fetch_loss_and_grads: [ ] K.function
:param iter_: [int] Number of iterations for label modification
:param step_: [float] Learning rate for label modification
:return label_target: [nparray] Corrected label
"""
for i in range(iter):
loss_value, grad_values = eval_loss_and_grads(x, fetch_loss_and_grads)
# if max_loss is not None and loss_value > max_loss:
# break
x[1] = x[1] - step * np.squeeze(grad_values).reshape(-1, 1)
return x
def label_correction(model, model_input, label, iter_=1, step_=1e-3):
"""correct label
:param model: [keras model] Relapse risk prediction model
:param model_input: [dataframe] Inputs
:param label: [series] Labels that need to be corrected
:param iter_: [int] Number of iterations for label modification
:param step_: [float] Learning rate for label modification
:return label_target: [dataframe] Corrected label
"""
loss = K.variable(0.)
coeff = 1
activation = model.get_layer(index=-1).output
scaling = K.prod(K.cast(K.shape(activation), 'float32'))
loss = loss + coeff * K.sum(K.square(activation[:, :])) / scaling
dream = model.input
grads = K.gradients(loss, dream[1])
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream[0], dream[1]], outputs, K.set_learning_phase(0))
label_target = pd.DataFrame(copy.deepcopy(label))
label_target = gradient_ascent([model_input, label_target], fetch_loss_and_grads, iter=iter_, step=step_)[1]
return label_target
def get_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None):
"""
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
shape_list = np.append(np.array([-1]), np.squeeze(inputs.shape[1:].as_list()))
# shape_list_fir = np.append(np.squeeze(inputs.shape[1:].as_list()), np.array([16]))
# shape_list_sec = np.array([16, 8])
# shape_list_thr = np.array([8, 1])
inputs_ = tf.cast(tf.reshape(inputs, shape_list), dtype)
labels = tf.cast(tf.reshape(labels, [-1, 1]), dtype)
# w_init = tf.truncated_normal_initializer(stddev=0.1)
# w1 = _get_var('w1', shape_list_fir, dtype, initializer=w_init)
# w2 = _get_var('w2', shape_list_sec, dtype, initializer=w_init)
# w3 = _get_var('w3', shape_list_thr, dtype, initializer=w_init)
# w4 = _get_var('w4', [1, 1], dtype, initializer=w_init)
#
# b_init = tf.constant_initializer(0.0)
# b1 = _get_var('b1', 1, dtype, initializer=b_init)
# b2 = _get_var('b2', 1, dtype, initializer=b_init)
# b3 = _get_var('b3', 1, dtype, initializer=b_init)
# b4 = _get_var('b4', 1, dtype, initializer=b_init)
#
# act = tf.nn.relu
#
# l0 = tf.identity(inputs_, name='l0')
# z1 = tf.add(tf.matmul(l0, w1), b1, name='z1')
# l1 = act(z1, name='l1')
# z2 = tf.add(tf.matmul(l1, w2), b2, name='z2')
# l2 = act(z2, name='l2')
# z3 = tf.add(tf.matmul(l2, w3), b3, name='z3')
# l3 = act(z3, name='l3')
# z4 = tf.add(tf.matmul(l3, w4), b4, name='z4')
# logits = tf.squeeze(l3)
# out = tf.sigmoid(logits)
dense1 = tf.layers.dense(inputs=inputs_, units=64, activation=tf.nn.relu)
dense2 = tf.layers.dense(inputs=dense1, units=16, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=dense2, units=1, activation=tf.nn.sigmoid)
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
else:
# Weighted loss.
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts)
return w_dict, loss, logits
def reweight_random(bsize, eps=0.0):
"""Reweight examples using random numbers.
:param bsize: [int] Batch size.
:param eps: [float] Minimum example weights, default 0.0.
"""
ex_weight = tf.random_normal([bsize], mean=0.0, stddev=1.0)
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_autodiff(inp_a,
label_a,
inp_b,
label_b,
bsize_a,
bsize_b,
eps=0.0,
gate_gradients=1):
"""Reweight examples using automatic differentiation.
:param inp_a: [Tensor] Inputs for the noisy pass.
:param label_a: [Tensor] Labels for the noisy pass.
:param inp_b: [Tensor] Inputs for the clean pass.
:param label_b: [Tensor] Labels for the clean pass.
:param bsize_a: [int] Batch size for the noisy pass.
:param bsize_b: [int] Batch size for the clean pass.
:param eps: [float] Minimum example weights, default 0.0.
:param gate_gradients: [int] Tensorflow gate gradients, reduce concurrency.
"""
ex_wts_a = tf.zeros([bsize_a], dtype=tf.float32)
ex_wts_b = tf.ones([bsize_b], dtype=tf.float32) / float(bsize_b)
w_dict, loss_a, logits_a = get_model(
inp_a, label_a, ex_wts=ex_wts_a, is_training=True, reuse=True)
var_names = w_dict.keys()
var_list = [w_dict[kk] for kk in var_names]
grads = tf.gradients(loss_a, var_list, gate_gradients=gate_gradients)
var_list_new = [vv - gg for gg, vv in zip(grads, var_list)]
w_dict_new = dict(zip(var_names, var_list_new))
_, loss_b, logits_b = get_model(
inp_b, label_b, ex_wts=ex_wts_b, is_training=True, reuse=True, w_dict=w_dict_new)
grads_ex_wts = tf.gradients(loss_b, [ex_wts_a], gate_gradients=gate_gradients)[0]
ex_weight = -grads_ex_wts
ex_weight_plus = tf.maximum(ex_weight, eps)
ex_weight_sum = tf.reduce_sum(ex_weight_plus)
ex_weight_sum += tf.to_float(tf.equal(ex_weight_sum, 0.0))
ex_weight_norm = ex_weight_plus / ex_weight_sum
return ex_weight_norm
def reweight_hard_mining(inp, label, positive=False):
"""Reweight examples using hard mining.
:param inp: [Tensor] [N, ...] Inputs.
:param label: [Tensor] [N] Labels
:param positive: [bool] Whether perform hard positive mining or hard negative mining.
:return [Tensor] Examples weights of the same shape as the first dim of inp.
"""
_, loss, logits = get_model(inp, label, ex_wts=None, is_training=True, reuse=True)
# Mine for positive
if positive:
loss_mask = loss * label
else:
loss_mask = loss * (1 - label)
if positive:
k = tf.cast(tf.reduce_sum(1 - label), tf.int32)
else:
k = tf.cast(tf.reduce_sum(label), tf.int32)
k = tf.maximum(k, 1)
loss_sorted, loss_sort_idx = tf.nn.top_k(loss_mask, k)
if positive:
mask = 1 - label
else:
mask = label
updates = tf.ones([tf.shape(loss_sort_idx)[0]], dtype=label.dtype)
mask_add = tf.scatter_nd(tf.expand_dims(loss_sort_idx, axis=1), updates, [tf.shape(inp)[0]])
mask = tf.maximum(mask, mask_add)
mask_sum = tf.reduce_sum(mask)
mask_sum += tf.cast(tf.equal(mask_sum, 0.0), tf.float32)
mask = mask / mask_sum
return mask
def get_lenet_model(inputs,
labels,
is_training=True,
dtype=tf.float32,
w_dict=None,
ex_wts=None,
reuse=None):
"""Builds a simple LeNet.
:param inputs: [Tensor] Inputs.
:param labels: [Tensor] Labels.
:param is_training: [bool] Whether in training mode, default True.
:param dtype: [dtype] Data type, default tf.float32.
:param w_dict: [dict] Dictionary of weights, default None.
:param ex_wts: [Tensor] Example weights placeholder, default None.
:param reuse: [bool] Whether to reuse variables, default None.
"""
if w_dict is None:
w_dict = {}
def _get_var(name, shape, dtype, initializer):
key = tf.get_variable_scope().name + '/' + name
if key in w_dict:
return w_dict[key]
else:
var = tf.get_variable(name, shape, dtype, initializer=initializer)
w_dict[key] = var
return var
with tf.variable_scope('Model', reuse=reuse):
inputs_ = tf.cast(tf.reshape(inputs, [-1, 28, 28, 1]), dtype)
labels = tf.cast(labels, dtype)
w_init = tf.truncated_normal_initializer(stddev=0.1)
w1 = _get_var('w1', [5, 5, 1, 16], dtype, initializer=w_init) # [14, 14, 16]
w2 = _get_var('w2', [5, 5, 16, 32], dtype, initializer=w_init) # [7, 7, 32]
w3 = _get_var('w3', [5, 5, 32, 64], dtype, initializer=w_init) # [4, 4, 64]
w4 = _get_var('w4', [1024, 100], dtype, initializer=w_init)
w5 = _get_var('w5', [100, 1], dtype, initializer=w_init)
b_init = tf.constant_initializer(0.0)
b1 = _get_var('b1', [16], dtype, initializer=b_init)
b2 = _get_var('b2', [32], dtype, initializer=b_init)
b3 = _get_var('b3', [64], dtype, initializer=b_init)
b4 = _get_var('b4', [100], dtype, initializer=b_init)
b5 = _get_var('b5', [1], dtype, initializer=b_init)
act = tf.nn.relu
# Conv-1
l0 = tf.identity(inputs_, name='l0')
z1 = tf.add(tf.nn.conv2d(inputs_, w1, [1, 1, 1, 1], 'SAME'), b1, name='z1')
l1 = act(tf.nn.max_pool(z1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l1')
# Conv-2
z2 = tf.add(tf.nn.conv2d(l1, w2, [1, 1, 1, 1], 'SAME'), b2, name='z2')
l2 = act(tf.nn.max_pool(z2, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l2')
# Conv-3
z3 = tf.add(tf.nn.conv2d(l2, w3, [1, 1, 1, 1], 'SAME'), b3, name='z3')
l3 = act(tf.nn.max_pool(z3, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME'), name='l3')
# FC-4
z4 = tf.add(tf.matmul(tf.reshape(l3, [-1, 1024]), w4), b4, name='z4')
l4 = act(z4, name='l4')
# FC-5
z5 = tf.add(tf.matmul(l4, w5), b5, name='z5')
logits = tf.squeeze(z5)
out = tf.sigmoid(logits)
if ex_wts is None:
# Average loss.
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
else:
# Weighted loss.
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels) * ex_wts)
return w_dict, loss, logits
|
{
"imported_by": [
"/Regression/src/main.py",
"/Regression/src/learn_weight_main.py"
],
"imports": [
"/Regression/src/model/evaluate.py",
"/Regression/src/model/bulid_model.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/preprocess/plot_tabel.py
|
import copy
import pandas as pd
import matplotlib.pyplot as plt
from model.history_ import plot_history_df, plot_metric_df
import numpy as np
from scipy.stats import ttest_ind, levene
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
def mape(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def metric_hist(data, nor=None):
root_ = '../report/result/'
file_list = [
'ablation_time_enh_1nrun_10Fold.csv',# ours
# 'ablation_time_vanilla_att_only__1nrun_10Fold.csv',# att only
# 'ablation_time_vanilla_natt_1nrun_10Fold.csv',#mlp only
# 'ablation_time_enh_att_only__10nrun_1Fold.csv',#
'ablation_time_enh_natt_1nrun_10Fold.csv',# mlp+lc
'lr_10nrun_1Fold.csv',# baseline_lasso
'lr_non_1nrun_10Fold.csv',# nonrelapse
'gbm_1nrun_10Fold.csv',# gbm
'gbm_non_1nrun_10Fold.csv',# nonrelapse
'ablation_time_vanilla_1nrun_10Fold.csv',# ATT+MLP
'ablation_time_vanilla_non_1nrun_10Fold.csv',# att+mlp+non relapse
# 'ablation_time_learn_weight_10nrun_1Fold.csv',
# 'ablation_time_enh_non_10nrun_1Fold.csv', # 0.2297
# 'ablation_time_vanilla_att_only_10nrun_1Fold.csv',#
# 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686
# 'ablation_time_enh_att_only__10nrun_1Fold.csv',# 0.5690
# 'ablation_time_enh_natt__10nrun_1Fold.csv',# 0.5686
]
metric_file_list = ['metric_' + file for file in file_list]
history_file_list = ['history_' + file for file in file_list]
pred_file_list = ['prediction' + file for file in file_list]
tt_pvalue_list = np.array([])
lv_pvalue_list = np.array([])
metric_file_base = metric_file_list[0]
metric_df_base = pd.read_csv(root_ + metric_file_base)
for metric_file in metric_file_list:
metric_df = pd.read_csv(root_ + metric_file)
mae_col = metric_df.filter(regex=r'mae').columns
mse_col = metric_df.filter(regex=r'mse').columns
# metric_df[mae_col] = metric_df.loc[:, mae_col] * 562.062540
# metric_df[mse_col] = metric_df.loc[:, mse_col] * 562.062540**2
print('\n', metric_file)
for col in metric_df.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} median {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max(),
metric_df[col].median(),
metric_df[col].min()))
v1 = metric_df_base['test_mae']
v2 = metric_df['test_mae']
std_ = levene(v1, v2).pvalue
lv_pvalue_list = np.append(lv_pvalue_list, std_)
equal_var_ = False
if std_ > 0.05:
equal_var_ = True
res = ttest_ind(v1, v2, equal_var=equal_var_).pvalue
tt_pvalue_list = np.append(tt_pvalue_list, res)
tt_pvalue_list = tt_pvalue_list.reshape(-1, 1)
for pred_file in pred_file_list:
pred_df = pd.read_csv(root_ + pred_file, index_col=0)
data_inver_label_df = pd.DataFrame([])
metric_df = pd.DataFrame([])
for pred in pred_df:
data_co = data.filter(regex=r'dense|^label')
data_ = copy.deepcopy(data_co)
data_.loc[:, 'label'] = np.array(pred_df[pred])
data_inver_pred = pd.DataFrame(nor.inverse_transform(data_), columns=data_.columns)
data_inver = pd.DataFrame(nor.inverse_transform(data_co), columns=data_co.columns)
data_inver_pred_label = data_inver_pred['label']
data_inver_label = data_inver['label']
mae = mean_absolute_error(data_inver_label, data_inver_pred_label)
mse = mean_squared_error(data_inver_label, data_inver_pred_label)
mape_ = mape(data_inver_label, data_inver_pred_label)
r2 = r2_score(data_inver_label, data_inver_pred_label)
dict_ = dict(zip([
'test_r2', 'test_mse', 'test_mae', 'test_mape'],
[
r2, mse, mae, mape_,
]))
metric_ = pd.DataFrame.from_dict([dict_])
metric_df = pd.concat([metric_df, metric_], axis=0)
data_inver_label_df = pd.concat([data_inver_label_df, data_inver_label], axis=1)
# data_inver.to_csv(root_ + 'inver' + pred_file)
history_df_all_list = []
for history_file in history_file_list:
history_df_all = pd.read_csv(root_ + history_file)
history_df_all_list.append(history_df_all)
# plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='')
plot_history_df(history_df_all_list, task_name='of the experimental results of ablation time prediction ', val_flag='val_')
plt.show()
metric_df_all_list = []
metric_file_list = ['metric_ablation_time_enh_10nrun_1Fold.csv',
# 'metric_ablation_time_enh_non_10nrun_1Fold.csv',
'metric_ablation_time_vanilla_10nrun_1Fold.csv',
# 'metric_ablation_time_vanilla_non_10nrun_1Fold.csv',
'metric_gbm_10nrun_1Fold.csv',
# 'metric_gbm_non_10nrun_1Fold.csv',
'metric_lr_10nrun_1Fold.csv',
# 'metric_lr_non_10nrun_1Fold.csv',
]
for history_file in metric_file_list:
history_df_all = pd.read_csv(root_ + history_file)
metric_df_all_list.append(history_df_all)
# plot_history_df(history_df_all_list, task_name='ablation_time', val_flag='')
plot_metric_df(metric_df_all_list, task_name='ablation_time', val_flag='test_')
plt.show()
pass
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
plt.rc('font', family='Times New Roman')
font_size = 16
def plot_metric_df(history_list, task_name, val_flag='test_'):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['r2', 'mae', 'mse']
fig = plt.figure(figsize=(20, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_metric(history_list, metric, val_flag)
fig.subplots_adjust(top=0.8)
legend_labels = ['ours',
# 'enh_nonrelapse',
'ATT+MLP',
# 'vanilla_nonrelapse',
'LGB',
# 'lightgbm_nonrelapse',
'Lasso',
# 'lasso_nonrelapse'
]
plt.legend(labels= legend_labels,
ncol = len(legend_labels),
# loc='best',
loc='upper center',
fontsize=14,
bbox_to_anchor=(-1.2, 1, 1, 0.2),
borderaxespad = 0.,
)
# plt.title('{} {}'.format(task_name, metric), fontsize=font_size)
def show_metric(history_list, metrics_name, val_flag=''):
marker_list = ['*', 'd', 's', 'x', 'o']
metrics_name_dict = {'r2':'R-square', 'mae':'mean absolute error', 'mse':'mean squared error'}
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}\b'.format(val_flag, metrics_name))[:3000]
plt.plot(history_metric, linestyle=':', marker=marker_list[m], linewidth=2)
plt.xticks(range(0, 11), fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(metrics_name_dict[metrics_name], fontsize=font_size)
plt.xlabel('Round', fontsize=font_size)
def plot_history_df(history_list, task_name, val_flag=''):
if 'relapse_risk' in task_name:
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
row = math.floor(math.sqrt(L))
col = L / row
for i, metric in enumerate(metric_list):
plt.subplot(row, col, i+1)
show_history(history_list, metric, val_flag)
plt.legend(labels=['attention', 'attention+mlp', 'attention+label corrected',
'attention+mlp+label corrected(ours)', 'mlp', 'mlp+label corrected'],
fontsize=14)
# plt.title('{} {}'.format(metric, task_name), fontsize=font_size)
def show_history(history_list, metrics_name, val_flag=''):
marker_list = ['^', 'd', 's', '*', 'x', 'o']
for m, history in enumerate(history_list):
history_metric = history.filter(regex=r'\b{}{}'.format(val_flag, metrics_name))[:3000]
history_ = np.mean(history_metric, axis=1)
len_ = history_.shape[0]
plt.plot(history_, linewidth=2, marker=marker_list[m], markevery=200)
plt.fill_between(range(len_), np.min(history_metric, axis=1), np.max(history_metric, axis=1), alpha=0.3)
plt.xticks(fontsize=font_size)
plt.yticks(fontsize=font_size)
plt.ylabel(val_flag + metrics_name, fontsize=font_size)
plt.xlabel('Epoch', fontsize=font_size)
def plot_history(history_list, task_name, val_flag=False):
if task_name == 'relapse_risk':
metric_list = ['loss', 'f1']
else:
metric_list = ['loss', 'r2']
plt.figure(figsize=(12, 4))
L = len(metric_list)
for i, metric in enumerate(metric_list):
plt.subplot(squrt(), L, i+1)
show_train_history(history_list, metric)
if val_flag:
show_train_history(history_list, 'val_{}'.format(metric))
plt.legend(labels=[metric, 'val_{}'.format(metric)], loc='upper left')
plt.title('{} {}'.format(task_name, metric))
def history_save(history_list, history_name):
history_all = pd.DataFrame([])
for history in history_list:
history_ = pd.DataFrame.from_dict(history.history, orient='index')
history_all = pd.concat([history_all, history_], axis=0)
history_all.to_csv('./hitory_{}.csv'.format(history_name))
def show_train_history(history_list, metrics_name):
metrics_list = None
for history in history_list:
history_metric = pd.DataFrame(np.array(history.history[metrics_name]).reshape(1, -1))
if metrics_list is None:
metrics_list = history_metric
else:
metrics_list = pd.concat([metrics_list, history_metric], axis=0)
# metrics = np.median(metrics_list, axis=0)
metrics = np.mean(metrics_list, axis=0)
plt.plot(metrics)
plt.ylabel(metrics_name)
plt.xlabel('Epoch')
|
{
"imported_by": [],
"imports": [
"/Regression/src/model/history_.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/useless/ave_logsit_baseline.py
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import six
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score
from sklearn.linear_model import RidgeClassifierCV, LogisticRegressionCV, RidgeCV, LassoCV, LinearRegression
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
import lightgbm as lgb
import matplotlib.pyplot as plt
# from deepctr.models import DeepFM, xDeepFM, DCN, WDL
# from deepctr.feature_column import SparseFeat, get_feature_names, DenseFeat
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, data_preprocessing, anomaly_dectection
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def ctr_model(linear_feature_columns, dnn_feature_columns):
adam = tf.keras.optimizers.Adam(lr=0.0001)
model = WDL(linear_feature_columns, dnn_feature_columns, task='regression')
# model = xDeepFM(linear_feature_columns, dnn_feature_columns, task='regression')
model.compile(adam, "huber_loss", metrics=['mae'],)
return model
def baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed):
clf = lgb.LGBMRegressor(max_depth=3,
bagging_fraction=0.7,
feature_fraction=0.7,
reg_alpha=0.5,
reg_lambda=0.5,
min_child_samples=10,
n_estimators=200,
learning_rate=1e-1,
random_state=seed,
)
# clf = lgb.LGBMRegressor(max_depth=4,
# bagging_fraction=0.8,
# feature_fraction=0.8,
# reg_alpha=0.8,
# reg_lambda=0.8,
# min_child_samples=10,
# n_estimators=500,
# learning_rate=1e-1,
# )
# clf = lgb.LGBMRegressor()
# clf = LassoCV()
# clf = RidgeCV()
return clf
def run(train_data, test_data, seed, target='label'):
np.random.seed(seed)
train_data, test_data, co_col, ca_col = data_preprocessing(train_data, test_data,
ca_co_sel_flag=False, onehot_flag=False)
# train_data, _ = anomaly_dectection(train_data, test_data=pd.DataFrame())
# _, test_data = anomaly_dectection(train_data=pd.DataFrame(), test_data=test_data)
# train_data, test_data = anomaly_dectection(train_data=train_data, test_data=test_data)
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, val_ratio=val_ratio, seed=seed)
# fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=pd.concat([train_set_mix, test_set], axis=0)[feat].nunique(), embedding_dim=4)
# for i, feat in enumerate(ca_col)] + [DenseFeat(feat, 1,)
# for feat in co_col]
#
# dnn_feature_columns = fixlen_feature_columns
# linear_feature_columns = fixlen_feature_columns
# feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)
# train_set_mix = {name: train_set_mix[name].values for name in feature_names}
# test_set = {name: test_set[name].values for name in feature_names}
# model = ctr_model(linear_feature_columns, dnn_feature_columns,)
# history = model.fit(train_set_mix, train_set_mix_label[target].values,
# batch_size=512, epochs=180, verbose=1, validation_split=0.2, )
# train_set_mix = train_set_mix.loc[train_set_mix_label['sup_label'] == 0]
# train_set_mix_label = train_set_mix_label.loc[train_set_mix_label['sup_label'] == 0]
model = baseline_model(train_set_mix, train_set_mix_label, ca_col, co_col, seed)
model.fit(train_set_mix, train_set_mix_label[target])
# feat_df = pd.DataFrame({'column': train_set_mix.columns, 'importance': model.feature_importances_.round(5)})
# feat_df_sort = feat_df.sort_values(by='importance', ascending=False)
# feat_df_sort_ = feat_df_sort.set_index(['column'])
# feat_df_sort_[:30].plot.barh(figsize=(15, 15), fontsize=12)
# plt.title("n61_lgb_特征重要性")
# plt.show()
train_target_pred = model.predict(train_set_mix)
test_target_pred = model.predict(test_set)
train_R2 = r2_score(train_set_mix_label[target], train_target_pred)
num_data = train_set_mix.shape[0]
num_feat = train_set_mix.shape[1]
train_R2_ad = 1 - ((1 - train_R2) * (num_data - 1)) / abs(num_data - num_feat - 1)
test_R2 = r2_score(test_set_label[target], test_target_pred)
num_data = test_set.shape[0]
num_feat = test_set.shape[1]
test_R2_ad = 1 - ((1 - test_R2) * (num_data - 1)) / abs(num_data - num_feat - 1)
train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred)
train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred)
test_mse = mean_squared_error(test_set_label[target], test_target_pred)
test_mae = mean_absolute_error(test_set_label[target], test_target_pred)
test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, ))
err = test_set_label[target] - np.squeeze(test_target_pred)
return [train_R2, test_R2, train_R2_ad, test_R2_ad, train_mse, test_mse, train_mae, test_mae, test_mape]
def run_many(train_data, test_data):
metric_list_all = []
for trial in tqdm(six.moves.xrange(nrun)):
metric_list = run(train_data, test_data, (trial * 2718) % 2020)
metric_list_all.append(metric_list)
metric_df = pd.DataFrame(np.array(metric_list_all))
metric_df.columns = ['train_R2', 'test_R2',
'train_R2_ad', 'test_R2_ad',
'train_mse', 'test_mse',
'train_mae', 'test_mae',
'test_mape',]
for col in metric_df.columns:
print('{} {:.4f} ({:.4f}) max: {:.4f} min: {:.4f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max(),
metric_df[col].min()))
pass
def main():
train_data, test_data = load_data_(datasets_name)
run_many(train_data, test_data)
pass
datasets_name = 'LiverAblation'
nrun = 10
clean_ratio = 1
test_ratio = 0.2
val_ratio = 0.2
epoch = 200
batchsize = 1
iter_ = 1
step_ = 0.1
if __name__ == '__main__':
main()
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
import pandas as pd
import numpy as np
from preprocess import plot_tabel
def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0):
if test_retio == 0 or test_data is not None:
train_set = train_data
test_set = test_data
else:
train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed)
if clean_ratio < 1:
train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed)
label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'],
scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape)
alpha = 1
beta = 1
train_label_ = train_set_[target] + \
alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib
train_set_[target] = train_label_
train_set_['sup_label'] = 1
train_set_clean['sup_label'] = 0
test_set['sup_label'] = 0
else:
train_set_ = None
train_set_clean = train_set
train_set_mix = pd.concat([train_set_, train_set_clean], axis=0)
# mix_ratio = train_set[train_set[target] != train_set_mix[target]].index
# print('real mix ratio is {}'.format(mix_ratio))
if val_ratio > 0:
train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed)
val_set_label = val_set[[target, 'sup_label']]
val_set.drop(columns=[target, 'sup_label'], inplace=True)
else:
val_set = None
val_set_label = None
train_set_mix_label = train_set_mix[[target, 'sup_label']]
test_set_label = test_set[[target, 'sup_label']]
# plot_tabel.metric_hist(test_set, nor)
train_set_mix.drop(columns=[target, 'sup_label'], inplace=True)
test_set.drop(columns=[target, 'sup_label'], inplace=True)
return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label
def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'):
if test_data is not None:
train_data['tab'] = 1
test_data['tab'] = 0
data_raw = pd.concat([train_data, test_data], axis=0)
print('\ndata_raw', data_raw.shape)
data = data_raw.dropna(axis=1, how='all')
xx = data.isnull().sum()
data = data.fillna(0)
if ca_co_sel_flag:
ca_col = []
co_col = []
data_columns_label = data.filter(regex=r'label').columns
data_columns = data.columns.drop(data_columns_label)
# data_columns = data.columns.drop(['sup_label'])
for col in data_columns:
data_col = data[col]
col_feat_num = len(set(data_col))
if col_feat_num > ca_feat_th:
col_ = col + '_dense'
co_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
elif ca_feat_th >= col_feat_num > 1:
col_ = col + '_sparse'
ca_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
else:
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])])
data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform)
if onehot_flag:
data = pd.get_dummies(data, columns=ca_col)
co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃
mms = MinMaxScaler(feature_range=(0.1, 1.1))
std = StandardScaler()
xx = data.filter(regex=r'label').describe()
xx_col = xx.index
xx_min = xx.loc['min', :]
xx_max = xx.loc['max', :]
xx_std = xx.loc['std', :]
data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index)
if test_data is not None:
train_data = data[data['tab'] == 1].drop(columns=['tab'])
test_data = data[data['tab'] == 0].drop(columns=['tab'])
else:
train_data = data
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
return train_data, test_data, co_col, ca_col, std
def anomaly_dectection(train_data=None, test_data=None, target='label'):
clean_data = []
for data in [train_data, test_data]:
if not data.empty:
std_ = data[target].std()
mean_ = data[target].mean()
data = data[data[target] < mean_ + 3 * std_]
data = data[data[target] > mean_ - 3 * std_]
clean_data.append(data)
return clean_data[0], clean_data[1]
--- FILE SEPARATOR ---
#coding=gb18030
import numpy as np
import pandas as pd
def load_data_(datasets, task_name='', seed=2020):
if datasets == 'winequality_white':
data_path = '../DataSet/wine/{}.csv'.format(datasets)
data = pd.read_csv(data_path)
data.rename(columns={'quality': 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
train_data = data.fillna(0)
test_data = None
elif datasets == 'PPH':
data_path = '../DataSet/PPH/{}.csv'.format(datasets)
data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030')
data = pd.read_csv(data_path, encoding='gb18030', index_col='index')
col = []
for col_ in data.columns:
col.append(col_ + np.squeeze(data_head[col_].values))
data.columns = np.array(col)
# data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030')
data['sup_label'] = 0
label_col = data.filter(regex=r'n61').columns.values[0]
data.rename(columns={label_col: 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values
data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x)
data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values
data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x)
data['minutes'] += data['hours'] * 60
drop_columns = data.filter(
regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns
train_data = data.drop(columns=drop_columns)
# data.fillna(0, inplace=True)
test_data = None
elif datasets == 'LiverAblation':
data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets)
data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse')
# data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets)
# data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse')
data.rename(columns={'time_dense': 'label'}, inplace=True)
data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True)
drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns
data.drop(columns=drop_columns_, inplace=True)
data_1 = data.loc[data['sup_label'] == 1]
data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed)
data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed)
test_data = data.drop(index=data_undersmapling.index)
if 'non' in task_name:
train_data = data_0
else:
train_data = data_undersmapling
else:
train_data = None
test_data = None
return train_data, test_data
|
{
"imported_by": [],
"imports": [
"/Regression/src/preprocess/get_dataset.py",
"/Regression/src/preprocess/load_data.py"
]
}
|
Peroxidess/Ablation-Time-Prediction-Model
|
/Regression/src/useless/keras_att.py
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import six
import tensorflow as tf
from keras import losses
from keras import backend as K
from keras import optimizers
from keras.models import Sequential, Model
from keras.callbacks import EarlyStopping
from keras.layers import Input, Dense, Multiply, Activation, Layer, \
GlobalAveragePooling1D, Reshape, RepeatVector, Flatten, Lambda, Add, Embedding
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error, \
confusion_matrix, precision_score, recall_score, f1_score, r2_score
import matplotlib.pyplot as plt
from preprocess.load_data import load_data_
from preprocess.get_dataset import get_dataset_, foo, anomaly_dectection
class Self_Attention(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(Self_Attention, self).__init__(**kwargs)
def build(self, input_shape):
# 为该层创建一个可训练的权重
# inputs.shape = (batch_size, time_steps, seq_len)
self.kernel = self.add_weight(name='kernel',
shape=(3, 1, self.output_dim),
initializer='uniform',
trainable=True)
super(Self_Attention, self).build(input_shape)
def call(self, x):
x = K.expand_dims(x, axis=2)
WQ = K.dot(x, self.kernel[0])
WK = K.dot(x, self.kernel[1])
WV = K.dot(x, self.kernel[2])
print("WQ.shape", WQ.shape)
print("K.permute_dimensions(WK, [0, 2, 1]).shape", K.permute_dimensions(WK, [0, 2, 1]).shape)
QK = K.batch_dot(WQ, K.permute_dimensions(WK, [0, 2, 1]))
QK = QK / (x.shape.as_list()[-1] ** 0.5)
QK = K.softmax(QK)
print("QK.shape", QK.shape)
V = K.batch_dot(QK, WV)
return V
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.output_dim)
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def get_activations(model, inputs, print_shape_only=False, layer_name=None):
activations = []
input = model.input
if layer_name is None:
outputs = [layer.output for layer in model.layers]
else:
outputs = [layer.output for layer in model.layers if layer.name == layer_name] # all layer outputs
funcs = [K.function([input] + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
layer_outputs = [func([inputs, 1.])[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def r2(y_true, y_pred):
return 1 - K.sum(K.square(y_pred - y_true))/K.sum(K.square(y_true - K.mean(y_true)))
def r_square(y_true, y_pred):
SSR = K.mean(K.square(y_pred-K.mean(y_true)), axis=-1)
SST = K.mean(K.square(y_true-K.mean(y_true)), axis=-1)
return SSR/SST
def Att(att_dim, inputs, name):
V = inputs
QK = Dense(att_dim, bias=None)(inputs)
QK = Dense(att_dim, bias=None)(QK)
QK = Activation("softmax", name=name)(QK)
MV = Multiply()([V, QK])
return(MV)
def bulid_model(train_set_mix, train_set_mix_label, ca_col, co_col):
input_dim = train_set_mix.shape[-1]
inputs = Input(shape=(input_dim,))
atts1 = Att(input_dim, inputs, "attention_vec")
x = Dense(64, activation='relu')(atts1)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
# atts2 = Att(4, atts2, "attention_vec1")
output = Dense(1)(x)
model = Model(input=inputs, output=output)
return model
def Expand_Dim_Layer(tensor):
def expand_dim(tensor):
return K.expand_dims(tensor, axis=1)
return Lambda(expand_dim)(tensor)
def bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col):
input_dim = train_set_mix.shape[-1]
inputs_ = Input(shape=(input_dim,))
# inputs_emb = Embedding(10000, input_dim)(inputs_)
atts1 = Self_Attention(input_dim)(inputs_)
atts1 = GlobalAveragePooling1D()(atts1)
x = Dense(64, activation='relu')(atts1)
x = Dense(32, activation='relu')(x)
x = Dense(16, activation='relu')(x)
outputs = Dense(1)(x)
model = Model(inputs=inputs_, outputs=outputs)
model.summary()
return model
def run(train_data, test_data, seed, reg_flag=False, label_enh_flag=False, reg_enh_flag=False, target='label'):
train_data, test_data, co_col, ca_col = foo(train_data, test_data, ca_co_sel_flag=False, onehot_flag=True)
train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label = \
get_dataset_(train_data, test_data, clean_ratio=clean_ratio,
test_retio=test_ratio, seed=seed, val_ratio=val_ratio)
train_curr_label = train_set_mix_label[target]
test_curr_label = test_set_label[target]
model = bulid_model_atts(train_set_mix, train_set_mix_label, ca_col, co_col)
rms = optimizers.RMSprop(lr=1e-4)
model.compile(optimizer=rms, loss='mean_squared_error', metrics=['mse', 'mae', r2, r_square])
model.fit(train_set_mix, train_curr_label, epochs=epoch, batch_size=batchsize, validation_split=0.2,
callbacks=[EarlyStopping(monitor='val_loss', patience=200, min_delta=0.01)])
train_target_pred = model.predict(train_set_mix)
test_target_pred = model.predict(test_set)
num_data = train_set_mix.shape[0]
num_feat = train_set_mix.shape[1]
train_r2 = r2_score(train_set_mix_label[target], train_target_pred)
train_r2_ad = 1 - ((1 - train_r2) * (num_data - 1)) / abs(num_data - num_feat - 1)
test_r2 = r2_score(test_set_label[target], test_target_pred)
test_r2_ad = 1 - ((1 - test_r2) * (num_data - 1)) / abs(num_data - num_feat - 1)
train_mse = mean_squared_error(train_set_mix_label[target], train_target_pred)
train_mae = mean_absolute_error(train_set_mix_label[target], train_target_pred)
test_mse = mean_squared_error(test_set_label[target], test_target_pred)
test_mae = mean_absolute_error(test_set_label[target], test_target_pred)
test_mape = mean_absolute_percentage_error(test_set_label[target], test_target_pred.reshape(-1, ))
err_enh = test_set_label[target] - np.squeeze(test_target_pred)
# attention_vector = get_activations(model, train_set_mix[:1],
# print_shape_only=True,
# layer_name='attention_vec')[0].flatten()
# pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
# title='Attention Mechanism as a '
# 'function of input dimensions.')
# plt.show()
return test_r2, test_r2_ad, test_mse
def run_many(train_data, test_data):
metric_list_all = []
for trial in tqdm(six.moves.xrange(nrun)):
# train_metric, test_metric, train_metric_enh, test_metric_enh = \
# run(train_data, test_data, (trial * 2020) % 1000, reg_flag=True, label_enh_flag=True, reg_enh_flag=True)
metric_list = run(train_data, test_data, (trial * 2020) % 1000,
reg_flag=True, label_enh_flag=True, reg_enh_flag=True)
metric_list_all.append(metric_list)
metric_df = pd.DataFrame(np.array(metric_list_all))
metric_df.columns = ['train_metric', 'train_metric_enh', 'test_metric', 'test_metric_enh']
for col in metric_df.columns:
print('{} metric {:.3f} ({:.3f}) max: {:.3f}'.format(col, metric_df[col].mean(),
metric_df[col].std(),
metric_df[col].max()))
pass
def main():
train_data, test_data = load_data_(datasets_name)
run_many(train_data, test_data)
pass
np.random.seed(2020)
datasets_name = 'LiverAblation'
nrun = 5
clean_ratio = 1
test_ratio = 0.2
val_ratio = 0
epoch = 3000
batchsize = 16
iter_ = 10
step_ = 0.001
if __name__ == '__main__':
main()
|
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler
import pandas as pd
import numpy as np
from preprocess import plot_tabel
def get_dataset_(nor, train_data, test_data, clean_ratio, test_retio, seed, target='label', val_ratio=0):
if test_retio == 0 or test_data is not None:
train_set = train_data
test_set = test_data
else:
train_set, test_set = train_test_split(train_data, test_size=test_retio, random_state=seed)
if clean_ratio < 1:
train_set_, train_set_clean = train_test_split(train_set, test_size=clean_ratio, random_state=seed)
label_distrib = np.random.normal(loc=train_set_[target].describe().loc['mean'],
scale=train_set_[target].describe().loc['std'], size=train_set_[target].shape)
alpha = 1
beta = 1
train_label_ = train_set_[target] + \
alpha * np.random.normal(loc=0., scale=1., size=train_set_[target].shape) + beta * label_distrib
train_set_[target] = train_label_
train_set_['sup_label'] = 1
train_set_clean['sup_label'] = 0
test_set['sup_label'] = 0
else:
train_set_ = None
train_set_clean = train_set
train_set_mix = pd.concat([train_set_, train_set_clean], axis=0)
# mix_ratio = train_set[train_set[target] != train_set_mix[target]].index
# print('real mix ratio is {}'.format(mix_ratio))
if val_ratio > 0:
train_set_mix, val_set = train_test_split(train_set_mix, test_size=val_ratio, random_state=seed)
val_set_label = val_set[[target, 'sup_label']]
val_set.drop(columns=[target, 'sup_label'], inplace=True)
else:
val_set = None
val_set_label = None
train_set_mix_label = train_set_mix[[target, 'sup_label']]
test_set_label = test_set[[target, 'sup_label']]
# plot_tabel.metric_hist(test_set, nor)
train_set_mix.drop(columns=[target, 'sup_label'], inplace=True)
test_set.drop(columns=[target, 'sup_label'], inplace=True)
return train_set_mix, train_set_mix_label, val_set, val_set_label, test_set, test_set_label
def data_preprocessing(train_data, test_data=None, ca_feat_th=8, ca_co_sel_flag=True, onehot_flag=False, target='label'):
if test_data is not None:
train_data['tab'] = 1
test_data['tab'] = 0
data_raw = pd.concat([train_data, test_data], axis=0)
print('\ndata_raw', data_raw.shape)
data = data_raw.dropna(axis=1, how='all')
xx = data.isnull().sum()
data = data.fillna(0)
if ca_co_sel_flag:
ca_col = []
co_col = []
data_columns_label = data.filter(regex=r'label').columns
data_columns = data.columns.drop(data_columns_label)
# data_columns = data.columns.drop(['sup_label'])
for col in data_columns:
data_col = data[col]
col_feat_num = len(set(data_col))
if col_feat_num > ca_feat_th:
col_ = col + '_dense'
co_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
elif ca_feat_th >= col_feat_num > 1:
col_ = col + '_sparse'
ca_col.append(col_)
data.rename(columns={col: col_}, inplace=True)
else:
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
data[ca_col] = pd.concat([data[ca_col].apply(lambda ser: pd.factorize(ser)[0])])
data[ca_col] = data[ca_col].apply(LabelEncoder().fit_transform)
if onehot_flag:
data = pd.get_dummies(data, columns=ca_col)
co_col = co_col.append(data.columns[data.columns == target]) # 回归目标也需要归一化避免在sup_label分类预测中的模型崩溃
mms = MinMaxScaler(feature_range=(0.1, 1.1))
std = StandardScaler()
xx = data.filter(regex=r'label').describe()
xx_col = xx.index
xx_min = xx.loc['min', :]
xx_max = xx.loc['max', :]
xx_std = xx.loc['std', :]
data[co_col] = pd.DataFrame(std.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data[co_col] = pd.DataFrame(mms.fit_transform(data[co_col]), columns=co_col, index=data.index)
# data = pd.DataFrame(mms.fit_transform(data), columns=data.columns, index=data.index)
if test_data is not None:
train_data = data[data['tab'] == 1].drop(columns=['tab'])
test_data = data[data['tab'] == 0].drop(columns=['tab'])
else:
train_data = data
ca_col = data.filter(regex=r'sparse').columns
co_col = data.filter(regex=r'dense').columns
return train_data, test_data, co_col, ca_col, std
def anomaly_dectection(train_data=None, test_data=None, target='label'):
clean_data = []
for data in [train_data, test_data]:
if not data.empty:
std_ = data[target].std()
mean_ = data[target].mean()
data = data[data[target] < mean_ + 3 * std_]
data = data[data[target] > mean_ - 3 * std_]
clean_data.append(data)
return clean_data[0], clean_data[1]
--- FILE SEPARATOR ---
#coding=gb18030
import numpy as np
import pandas as pd
def load_data_(datasets, task_name='', seed=2020):
if datasets == 'winequality_white':
data_path = '../DataSet/wine/{}.csv'.format(datasets)
data = pd.read_csv(data_path)
data.rename(columns={'quality': 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
train_data = data.fillna(0)
test_data = None
elif datasets == 'PPH':
data_path = '../DataSet/PPH/{}.csv'.format(datasets)
data_head = pd.read_csv('../DataSet/PPH/PPH_head.csv', encoding='gb18030')
data = pd.read_csv(data_path, encoding='gb18030', index_col='index')
col = []
for col_ in data.columns:
col.append(col_ + np.squeeze(data_head[col_].values))
data.columns = np.array(col)
# data.to_csv('../DataSet/PPH/data_feat_name_add.csv', index=False, encoding='gb18030')
data['sup_label'] = 0
label_col = data.filter(regex=r'n61').columns.values[0]
data.rename(columns={label_col: 'label'}, inplace=True)
data.dropna(axis=0, subset=['label'], inplace=True)
data['hours'] = data.filter(regex=r'field12').values - data.filter(regex=r'field9').values
data['hours'] = data['hours'].apply(lambda x: 24 + x if x < 0 else x)
data['minutes'] = data.filter(regex=r'field13').values - data.filter(regex=r'field10').values
data['minutes'] = data['minutes'].apply(lambda x: 60 + x if x < 0 else x)
data['minutes'] += data['hours'] * 60
drop_columns = data.filter(
regex=r'n421|field11|其他|field28|其他.1|n262|hours|n61|n51|n4417|n4318|field9|field10|field12|field13').columns
train_data = data.drop(columns=drop_columns)
# data.fillna(0, inplace=True)
test_data = None
elif datasets == 'LiverAblation':
data_path = '../DataSet/LiverAblation/{}.csv'.format(datasets)
data = pd.read_csv(data_path, encoding='gb18030', index_col='基线-患者基本信息-ID_sparse')
# data_path = '../DataSet/LiverAblation/{}_trans.csv'.format(datasets)
# data = pd.read_csv(data_path, encoding='gb18030', index_col='baseline_info_ID_sparse')
data.rename(columns={'time_dense': 'label'}, inplace=True)
data.rename(columns={'relapse_sparse': 'sup_label'}, inplace=True)
drop_columns_ = data.filter(regex=r'随|ID|cluster|followupInfomation').columns
data.drop(columns=drop_columns_, inplace=True)
data_1 = data.loc[data['sup_label'] == 1]
data_0 = data.loc[data['sup_label'] == 0].sample(n=data_1.shape[0] * 1, random_state=seed)
data_undersmapling = pd.concat([data_1, data_0]).sample(frac=1, random_state=seed)
test_data = data.drop(index=data_undersmapling.index)
if 'non' in task_name:
train_data = data_0
else:
train_data = data_undersmapling
else:
train_data = None
test_data = None
return train_data, test_data
|
{
"imported_by": [],
"imports": [
"/Regression/src/preprocess/get_dataset.py",
"/Regression/src/preprocess/load_data.py"
]
}
|
shashi/phosphene
|
/src/apps/devices/cube.py
|
import serial
import numpy
import math
from device import Device
from cubelib import emulator
from cubelib import mywireframe as wireframe
from animations import *
import time
import threading
# A class for the cube
class Cube(Device):
def __init__(self, port, dimension=10, emulator=False):
Device.__init__(self, "Cube", port)
self.array = numpy.array([[\
[0]*dimension]*dimension]*dimension, dtype='bool')
self.dimension = dimension
self.emulator = emulator
self.name = "Cube"
def set_led(self, x, y, z, level=1):
self.array[x][y][z] = level
def get_led(self, x, y, z):
return self.array[x][y][z]
def takeSignal(self, signal):
pass
def toByteStream(self):
# 104 bits per layer, first 4 bits waste.
bytesPerLayer = int(math.ceil(self.dimension**2 / 8.0))
print bytesPerLayer
discardBits = bytesPerLayer * 8 - self.dimension**2
print discardBits
bts = bytearray(bytesPerLayer*self.dimension)
pos = 0
mod = 0
for layer in self.array:
mod = discardBits
for row in layer:
for bit in row:
if bit: bts[pos] |= 1 << mod
else: bts[pos] &= ~(1 << mod)
mod += 1
if mod == 8:
mod = 0
pos += 1
return bts
def redraw(self, wf=None, pv=None):
if self.emulator:
wf.setVisible(emulator.findIndexArray(self.array))
pv.run()
if __name__ == "__main__":
cube = Cube("/dev/ttyACM0")
#pv = emulator.ProjectionViewer(640,480)
#wf = wireframe.Wireframe()
#pv.createCube(wf)
count = 0
start = (0, 0, 0)
point = (0,0)
#fillCube(cube,0)
#cube.redraw()
#time.sleep(100)
def sendingThread():
while True:
cube.port.write("S")
bs = cube.toByteStream()
for i in range(0, 130):
time.sleep(0.01)
cube.port.write(chr(bs[i]))
print "wrote", bs[i]
assert(cube.port.read() == '.')
t = threading.Thread(target=sendingThread)
t.start()
#fillCube(cube,0)
#cube.set_led(9,9,9)
#for x in range(0, 9):
# for y in range(0, 9):
# for z in range(0, 9):
# cube.set_led(x, y, z, 1)
# time.sleep(1)
while True:
#wireframeCube(cube,(1,1,1),(9,9,9))
fillCube(cube, 1)
#planeBounce(cube,(count/20)%2+1,count%20)
#planeBounce(cube,1,count)
#start = wireframeExpandContract(cube,start)
#rain(cube,count,5,10)
#time.sleep(.1)
#point = voxel(cube,count,point)
#sine_wave(cube,count)
#pyramids(cube,count)
#side_waves(cube,count)
#fireworks(cube,4)
#technites(cube, count)
#setPlane(cube,1,(counter/100)%10,1)
#setPlane(cube,2,0,1)
#stringPrint(cube,'TECHNITES',count)
#moveFaces(cube)
#cube.set_led(0,0,0)
#cube.set_led(0,0,1)
cube.redraw()
count += 1
time.sleep(0.1)
|
import serial
import numpy
from threading import Thread
class Device:
def __init__(self, name, port):
self.array = []
try:
self.port = serial.Serial(port)
self.isConnected = True
print "Connected to", name
except Exception as e:
self.port = None
self.isConnected = False
print "Error connecting to", name, e
def setupSignal(self, signal):
pass
def graphOutput(self, signal):
pass
def truncate(self, array):
return numpy.array([min(int(i), 255) for i in array])
def toByteStream(self, array):
return [chr(i) for i in self.truncate(array)]
def readAck(self):
print self.port.read(size=1) # Read the acknowledgement
def redraw(self):
if self.isConnected:
self.port.write(self.toByteStream())
self.port.read(size=1) #Acknowledgement
else:
#print "Connection to %s lost!" % self.name
pass
def isUnresponsive(self):
print "%s is not responding! Stopping to communicate."
self.isConnected = False
|
{
"imported_by": [
"/src/apps/psychroom.py"
],
"imports": [
"/src/apps/devices/device.py"
]
}
|
shashi/phosphene
|
/src/apps/devices/waterfall.py
|
import device
from phosphene.signal import *
import scipy, numpy
from phosphene.graphs import barGraph
class Waterfall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "Waterfall", port)
def setupSignal(self, signal):
def waterfall(s):
lights = [s.avg8[i] * 150 / max(0.5, s.longavg8[i]) \
for i in range(0, 8)]
fans = [2*i for i in lights]
lights.reverse()
return lights + fans
signal.waterfall = lift(waterfall)
def graphOutput(self, signal):
return barGraph(self.truncate(signal.waterfall) / 255.0)
def redraw(self, signal):
payload = self.toByteStream(signal.waterfall)
self.port.write(payload)
|
import pdb
import scipy
import numpy
import pygame
from pygame import display
from pygame.draw import *
from pygame import Color
import math
def barGraph(data):
"""
drawing contains (x, y, width, height)
"""
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
w = W / l
try:
for i in range(0, l):
h = data[i]
c = Color(0, 0, 0, 0)
c.hsva = (0, 100, 100, 0)
x = x0 + i * w
y = y0 + H * (1 - h)
rect(surface, c, \
(x, y, 0.9 * w, h * H))
except:
pdb.set_trace()
return f
def boopGraph(data):
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
dx = W / l
try:
for i in range(0, l):
d = data[i]
a = dx * d
x = (dx - a) / 2 + i * dx + x0
y = (H - dx) / 2 + (dx - a) / 2 + y0
c = Color(255, 255, 255, 255)
rect(surface, c, \
(x, y, a, a))
except:
pdb.set_trace()
return f
def circleRays(surface, center, data, transform=lambda y: scipy.log(y + 1)):
x0, y0 = center
total = math.radians(360)
l = len(data)
m = transform(max(data))
part = total/l
for i in range(0, l):
if m > 0:
p = transform(data[i])
h = p * 5
hue = p / m
c = Color(0, 0, 0, 0)
c.hsva = ((1-hue) * 360, 100, 100, 0)
x = x0 + (m*2+h)*math.cos(part * i)
y = y0 + (m*2+h)*math.sin(part*i)
line(surface, c,
(x0,y0),(x,y),1)
circle(surface,c, center,int(m*2),0)
def graphsGraphs(graphs, direction=0):
def f(surface, bigRect):
x0, y0, W, H = bigRect
h = H / len(graphs)
for graph in graphs:
graph(surface, (x0, y0, W, h))
y0 += h
return f
|
{
"imported_by": [
"/src/apps/psychroom.py"
],
"imports": [
"/src/phosphene/graphs.py"
]
}
|
shashi/phosphene
|
/src/apps/psychroom.py
|
#
# This script plays an mp3 file and communicates via serial.Serial
# with devices in the Technites psychedelic room to visualize the
# music on them.
#
# It talks to 4 devices
# WaterFall -- tubes with LEDs and flying stuff fanned to music
# DiscoBall -- 8 60 watt bulbs wrapped in colored paper
# LEDWall -- a 4 channel strip of LED
# this time it was the LED roof instead :p
# LEDCube -- a 10x10x10 LED cube - work on this is still on
#
# the script also has a sloppy pygame visualization of the fft and
# beats data
#
import sys
import time
import scipy
import pygame
from pygame import display
from pygame.draw import *
import pathsetup # this module sets up PYTHONPATH for all this to work
from devices.discoball import DiscoBall
from devices.waterfall import Waterfall
from devices.ledwall import LEDWall
from devices.cube import Cube
import phosphene
from phosphene import audio, signalutil, util
from phosphene.util import *
from phosphene.signal import *
from phosphene.dsp import *
from phosphene.graphs import *
from phosphene.signalutil import *
from cube import cubeProcess
#from phosphene import cube
from threading import Thread
# Setup devices with their corresponding device files
devs = [
Waterfall("/dev/ttyACM0"),
DiscoBall("/dev/ttyACM1"),
LEDWall("/dev/ttyACM2")
]
pygame.init()
surface = display.set_mode((640, 480))
if len(sys.argv) < 2:
print "Usage: %s file.mp3" % sys.argv[0]
sys.exit(1)
else:
fPath = sys.argv[1]
sF, data = audio.read(fPath)
import serial
signal = Signal(data, sF)
signal.A = lift((data[:,0] + data[:,1]) / 2, True)
for d in devs:
d.setupSignal(signal)
def devices(s):
#threads = []
for d in devs:
if d.isConnected:
def f():
d.redraw(s)
d.readAck()
#t = Thread(target=f)
#threads.append(t)
#t.start()
f()
#for t in threads:
# t.join(timeout=2)
# if t.isAlive():
# d.isUnresponsive()
surface.fill((0, 0, 0))
graphsGraphs(filter(
lambda g: g is not None,
[d.graphOutput(signal) for d in devs]))(surface, (0, 0, 640, 480))
CubeState = lambda: 0
CubeState.count = 0
#cube = Cube("/dev/ttyACM1", emulator=True)
def cubeUpdate(signal):
CubeState.count = cubeProcess(cube, signal, CubeState.count)
def graphsProcess(s):
display.update()
processes = [graphsProcess, devices] #, cube.emulator]
signal.relthresh = 1.66
soundObj = audio.makeSound(sF, data)
# make a pygame Sound object from the data
# run setup on the signal
signalutil.setup(signal)
soundObj.play() # start playing it. This is non-blocking
perceive(processes, signal, 90) # perceive your signal.
|
from devices.cubelib import emulator
from devices.cubelib import mywireframe as wireframe
from devices.animations import *
pv = emulator.ProjectionViewer(640,480)
wf = wireframe.Wireframe()
def cubeProcess(cube, signal, count):
pv.createCube(wf)
start = (0, 0, 0)
point = (0,0)
#planeBounce(cube,(count/20)%2+1,count%20)
#start = wireframeExpandContract(cube,start)
#rain(cube,count,5,10)
#time.sleep(.1)
#point = voxel(cube,count,point)
#sine_wave(cube,count)
#pyramids(cube,count)
#side_waves(cube,count)
#fireworks(cube,4)
technites(cube,count)
cube.redraw(wf, pv)
return count + 1
--- FILE SEPARATOR ---
import device
from phosphene.signal import *
from phosphene.signalutil import *
from phosphene.graphs import *
class LEDWall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "LEDWall", port)
def setupSignal(self, signal):
CHANNELS = 6
val = lambda s: [max(0, scipy.log(s.avg3[0]+1)) - scipy.log(s.longavg3[0]+1)]
signal.avg1Falling = fallingMax(val)
def f(s):
n = int(min(6, max(0, val(s)[0] * CHANNELS / (s.avg1Falling[0] if s.avg1Falling[0] > 0.01 else 1))))
return [1 for i in range(0, n)] + [0 for i in range(0, 6-n)]
signal.ledwall = lift(f)
def graphOutput(self, signal):
return None
def redraw(self, signal):
print "LEDWall", self.toByteStream(signal.ledwall)
self.port.write(self.toByteStream(signal.ledwall))
--- FILE SEPARATOR ---
import device
from phosphene.signal import *
import scipy, numpy
from phosphene.graphs import barGraph
class Waterfall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "Waterfall", port)
def setupSignal(self, signal):
def waterfall(s):
lights = [s.avg8[i] * 150 / max(0.5, s.longavg8[i]) \
for i in range(0, 8)]
fans = [2*i for i in lights]
lights.reverse()
return lights + fans
signal.waterfall = lift(waterfall)
def graphOutput(self, signal):
return barGraph(self.truncate(signal.waterfall) / 255.0)
def redraw(self, signal):
payload = self.toByteStream(signal.waterfall)
self.port.write(payload)
--- FILE SEPARATOR ---
import device
from phosphene.signal import *
from phosphene.signalutil import *
from phosphene.graphs import *
class DiscoBall(device.Device):
def __init__(self, port):
device.Device.__init__(self, "DiscoBall", port)
def setupSignal(self, signal):
signal.discoball = lift(lambda s: numpymap(lambda (a, b): 1 if a > b * 1.414 else 0, zip(s.avg12, s.longavg12)))
def graphOutput(self, signal):
return boopGraph(signal.discoball[:4])
def redraw(self, signal):
data = self.truncate(signal.discoball[:4] * 255)
print data
self.port.write(self.toByteStream(data))
--- FILE SEPARATOR ---
import serial
import numpy
import math
from device import Device
from cubelib import emulator
from cubelib import mywireframe as wireframe
from animations import *
import time
import threading
# A class for the cube
class Cube(Device):
def __init__(self, port, dimension=10, emulator=False):
Device.__init__(self, "Cube", port)
self.array = numpy.array([[\
[0]*dimension]*dimension]*dimension, dtype='bool')
self.dimension = dimension
self.emulator = emulator
self.name = "Cube"
def set_led(self, x, y, z, level=1):
self.array[x][y][z] = level
def get_led(self, x, y, z):
return self.array[x][y][z]
def takeSignal(self, signal):
pass
def toByteStream(self):
# 104 bits per layer, first 4 bits waste.
bytesPerLayer = int(math.ceil(self.dimension**2 / 8.0))
print bytesPerLayer
discardBits = bytesPerLayer * 8 - self.dimension**2
print discardBits
bts = bytearray(bytesPerLayer*self.dimension)
pos = 0
mod = 0
for layer in self.array:
mod = discardBits
for row in layer:
for bit in row:
if bit: bts[pos] |= 1 << mod
else: bts[pos] &= ~(1 << mod)
mod += 1
if mod == 8:
mod = 0
pos += 1
return bts
def redraw(self, wf=None, pv=None):
if self.emulator:
wf.setVisible(emulator.findIndexArray(self.array))
pv.run()
if __name__ == "__main__":
cube = Cube("/dev/ttyACM0")
#pv = emulator.ProjectionViewer(640,480)
#wf = wireframe.Wireframe()
#pv.createCube(wf)
count = 0
start = (0, 0, 0)
point = (0,0)
#fillCube(cube,0)
#cube.redraw()
#time.sleep(100)
def sendingThread():
while True:
cube.port.write("S")
bs = cube.toByteStream()
for i in range(0, 130):
time.sleep(0.01)
cube.port.write(chr(bs[i]))
print "wrote", bs[i]
assert(cube.port.read() == '.')
t = threading.Thread(target=sendingThread)
t.start()
#fillCube(cube,0)
#cube.set_led(9,9,9)
#for x in range(0, 9):
# for y in range(0, 9):
# for z in range(0, 9):
# cube.set_led(x, y, z, 1)
# time.sleep(1)
while True:
#wireframeCube(cube,(1,1,1),(9,9,9))
fillCube(cube, 1)
#planeBounce(cube,(count/20)%2+1,count%20)
#planeBounce(cube,1,count)
#start = wireframeExpandContract(cube,start)
#rain(cube,count,5,10)
#time.sleep(.1)
#point = voxel(cube,count,point)
#sine_wave(cube,count)
#pyramids(cube,count)
#side_waves(cube,count)
#fireworks(cube,4)
#technites(cube, count)
#setPlane(cube,1,(counter/100)%10,1)
#setPlane(cube,2,0,1)
#stringPrint(cube,'TECHNITES',count)
#moveFaces(cube)
#cube.set_led(0,0,0)
#cube.set_led(0,0,1)
cube.redraw()
count += 1
time.sleep(0.1)
|
{
"imported_by": [],
"imports": [
"/src/apps/cube.py",
"/src/apps/devices/ledwall.py",
"/src/apps/devices/waterfall.py",
"/src/apps/devices/discoball.py",
"/src/apps/devices/cube.py"
]
}
|
shashi/phosphene
|
/src/demo.py
|
import sys
import pdb
import pygame
from pygame import display
from pygame.draw import *
import scipy
import time
from phosphene import audio, util, signalutil, signal
from phosphene.graphs import barGraph, boopGraph, graphsGraphs
from threading import Thread
if len(sys.argv) < 2:
print "Usage: %s file.mp3" % sys.argv[0]
sys.exit(1)
else:
fPath = sys.argv[1]
# initialize PyGame
SCREEN_DIMENSIONS = (640, 480)
pygame.init()
surface = display.set_mode(SCREEN_DIMENSIONS)
sF, data = audio.read(fPath)
sig = signal.Signal(data, sF)
sig.A = signal.lift((data[:,0] + data[:,1]) / 2, True)
def beats(s):
""" Extract beats in the signal in 4 different
frequency ranges """
# quick note: s.avg4 is a decaying 4 channel fft
# s.longavg4 decays at a slower rate
# beat detection huristic:
# beat occured if s.avg4 * threshold > s.longavg4
threshold = 1.7
return util.numpymap(
lambda (x, y): 1 if x > threshold * y else 0,
zip(s.avg4 * threshold, s.longavg4))
# Lift the beats
sig.beats = signal.lift(beats)
# not sure if this can be called sustain.
# blend gives a decay effect
sig.sustain = signalutil.blend(beats, 0.7)
def graphsProcess(s):
# clear screen
surface.fill((0, 0, 0))
# draw a decaying fft differential and the beats in the full
# pygame window.
graphsGraphs([
barGraph(s.avg12rel / 10),
boopGraph(s.beats),
boopGraph(s.sustain)
])(surface, (0, 0) + SCREEN_DIMENSIONS)
# affect the window
display.update()
def repl():
""" call this function to give you a pdb shell
while the program is running. You will be
dropped in the current context. """
def replFunc():
pdb.set_trace()
replThread = Thread(target=replFunc)
replThread.start()
#repl()
# apply utility "lift"s -- this sets up signal.avgN and longavgN variables
signalutil.setup(sig)
soundObj = audio.makeSound(sF, data)
# make a pygame Sound object from the data
soundObj.play() # start playing it. This is non-blocking
# perceive signal at 90 fps (or lesser when not possible)
signal.perceive([graphsProcess], sig, 90)
|
import pdb
import scipy
import numpy
import pygame
from pygame import display
from pygame.draw import *
from pygame import Color
import math
def barGraph(data):
"""
drawing contains (x, y, width, height)
"""
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
w = W / l
try:
for i in range(0, l):
h = data[i]
c = Color(0, 0, 0, 0)
c.hsva = (0, 100, 100, 0)
x = x0 + i * w
y = y0 + H * (1 - h)
rect(surface, c, \
(x, y, 0.9 * w, h * H))
except:
pdb.set_trace()
return f
def boopGraph(data):
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
dx = W / l
try:
for i in range(0, l):
d = data[i]
a = dx * d
x = (dx - a) / 2 + i * dx + x0
y = (H - dx) / 2 + (dx - a) / 2 + y0
c = Color(255, 255, 255, 255)
rect(surface, c, \
(x, y, a, a))
except:
pdb.set_trace()
return f
def circleRays(surface, center, data, transform=lambda y: scipy.log(y + 1)):
x0, y0 = center
total = math.radians(360)
l = len(data)
m = transform(max(data))
part = total/l
for i in range(0, l):
if m > 0:
p = transform(data[i])
h = p * 5
hue = p / m
c = Color(0, 0, 0, 0)
c.hsva = ((1-hue) * 360, 100, 100, 0)
x = x0 + (m*2+h)*math.cos(part * i)
y = y0 + (m*2+h)*math.sin(part*i)
line(surface, c,
(x0,y0),(x,y),1)
circle(surface,c, center,int(m*2),0)
def graphsGraphs(graphs, direction=0):
def f(surface, bigRect):
x0, y0, W, H = bigRect
h = H / len(graphs)
for graph in graphs:
graph(surface, (x0, y0, W, h))
y0 += h
return f
|
{
"imported_by": [],
"imports": [
"/src/phosphene/graphs.py"
]
}
|
shashi/phosphene
|
/src/phosphene/signal.py
|
import time
import numpy
from util import indexable
__all__ = [
'Signal',
'lift',
'foldp',
'perceive'
]
class lift:
""" Annotate an object as lifted """
def __init__(self, f, t_indexable=None):
self.f = f
if hasattr(f, '__call__'):
self._type = 'lambda'
elif isinstance(self.f, (list, tuple, numpy.ndarray)):
self._type = 'iterable'
else:
raise ValueError(
"""You can lift only a function that takes
the signal as argument, or an iterable"""
)
self.indexable = t_indexable
def _manifest(self, signal):
# compute the current value of this lifted
# function given the current value of the signal
if self._type == "lambda":
return self.f(signal)
elif self._type == "iterable":
if self.indexable is None or self.indexable:
# Make the array temporally indexable
return indexable(self.f, signal.x)
elif indexable == False:
return self.f[signal.x]
def foldp(f, init=None):
"""Fold a value over time
"""
State = lambda: 0 # hack to let me store state
State.store = init
State.val = None
def g(signal):
val, store = f(signal, State.store)
State.store = store
State.val = val
return val
return lift(g)
class _WAIT:
# _WAIT instances are used in the locking
# mechanism in Signal to avoid recomputation
# when multiple threads are using a signal
pass
class Signal:
""" The Signal abstraction. """
def __init__(self, Y, sample_rate, max_fps=90):
self.Y = Y
self.x = 0
self.fps = 0
self.max_fps = max_fps
self.sample_rate = sample_rate
self.lifts = {}
self.t = lift(lambda s: s.time())
self.A = lift(Y[:,0], True)
self.cache = {}
def time(self, t=time.time):
# this signal's definition of time
return t()
def __getattr__(self, k):
# call the thing that is requred with self
if self.lifts.has_key(k):
# Lifted values must have the same value
# for the same x. Cache them.
# This also helps in performance e.g. when
# fft is needed a multiple places
if self.cache.has_key(k):
if isinstance(self.cache[k], _WAIT):
# Locking mechanism to avoid
# redundant computations by threads
while isinstance(self.cache[k], _WAIT):
pass
return self.cache[k][1]
else:
x, val = self.cache[k]
if x == self.x:
return val
self.cache[k] = _WAIT()
val = self.lifts[k]._manifest(self)
self.cache[k] = (self.x, val)
return val
else:
return self.__dict__[k]
def __setattr__(self, k, v):
if isinstance(v, lift):
self.lifts[k] = v
else:
self.__dict__[k] = v
def set_state(self, x, fps, frames):
self.x = x
self.fps = fps
self.frames = frames
def perceive(processes, signal, max_fps):
"""Let processes perceive the signal
simulates real-time reading of signals and runs all the functions
in processes (these functions take the current signal value as
argument)
"""
start_time = signal.time()
call_spacing = 1.0 / max_fps
sample_count = len(signal.Y)
prev_x = -1
x = 0
frames = 0
fps = max_fps
while True:
tic = signal.time()
# what should be the current sample?
x = int((tic - start_time) * signal.sample_rate)
if x >= sample_count:
break
frames += 1
# approximate current fps
fps = fps * 0.5 + 0.5 * signal.sample_rate / float(x - prev_x)
# Advance state of the signal
signal.set_state(x, fps, frames)
for p in processes:
p(signal) # show processes the signal
prev_x = x
toc = signal.time()
wait = call_spacing - (toc - tic)
# chill out before looping again
# FIXME: this assumes that the frame rate varies smoothly
# i.e. next frame takes approximately takes the
# same time as few frames immediately before it
if wait > 0:
time.sleep(wait)
|
import numpy
from threading import Thread # this is for the repl
__all__ = ['memoize', 'memoizeBy', 'numpymap', 'indexable', 'reverse']
# Helper functions
def memoize(f, key=None):
mem = {}
def g(*args):
k = str(args)
if mem.has_key(k):
return mem[k]
else:
r = f(*args)
mem[k] = r
return r
return g
def memoizeBy(f, x, *args):
# memoize by something else.
return memoize(lambda k: f(*args))(x)
def numpymap(f, X):
" returns a numpy array after maping "
return numpy.array(map(f, X))
def indexable(f, offset=0):
" make a list-like object "
if not hasattr(f, '__call__'):
# XXX: Assuming f is a sequence type
try: f[0]
except:
raise "Are you sure what you are trying" + \
"to make indexable is a function or" + \
"a sequence type?"
g = f
f = lambda i: g[i] # LOL
class Indexable:
def getFunction(self):
return f
def __getitem__(self, *i):
if len(i) == 1:
i = i[0]
if isinstance(i, int):
return f(i + offset)
# Handle range queries
elif isinstance(i, slice):
return [f(j + offset) for j in \
range(i.start, i.stop, 1 if i.step is None else 0)]
else:
raise "You will have to implement that crazy indexing."
def __len__(self):
return 0
return Indexable()
def windowedMap(f, samples, width, overlap):
return res
def reverse(l):
m = [c for c in l]
m.reverse()
return m
|
{
"imported_by": [],
"imports": [
"/src/phosphene/util.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/build/lib/io_stockx/models/__init__.py
|
# coding: utf-8
# flake8: noqa
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from io_stockx.models.address_object import AddressObject
from io_stockx.models.billing_object import BillingObject
from io_stockx.models.customer_object import CustomerObject
from io_stockx.models.customer_object_merchant import CustomerObjectMerchant
from io_stockx.models.customer_object_security import CustomerObjectSecurity
from io_stockx.models.customer_object_shipping import CustomerObjectShipping
from io_stockx.models.customers_id_selling_current import CustomersIdSellingCurrent
from io_stockx.models.customers_id_selling_current_pagination import CustomersIdSellingCurrentPagination
from io_stockx.models.customers_id_selling_current_paging import CustomersIdSellingCurrentPaging
from io_stockx.models.login_request import LoginRequest
from io_stockx.models.login_response import LoginResponse
from io_stockx.models.market_data import MarketData
from io_stockx.models.market_data_market import MarketDataMarket
from io_stockx.models.portfolio_id_del_request import PortfolioIdDelRequest
from io_stockx.models.portfolio_id_del_response import PortfolioIdDelResponse
from io_stockx.models.portfolio_id_del_response_portfolio_item import PortfolioIdDelResponsePortfolioItem
from io_stockx.models.portfolio_id_del_response_portfolio_item_merchant import PortfolioIdDelResponsePortfolioItemMerchant
from io_stockx.models.portfolio_id_del_response_portfolio_item_product import PortfolioIdDelResponsePortfolioItemProduct
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_market import PortfolioIdDelResponsePortfolioItemProductMarket
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_meta import PortfolioIdDelResponsePortfolioItemProductMeta
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_shipping import PortfolioIdDelResponsePortfolioItemProductShipping
from io_stockx.models.portfolio_id_del_response_portfolio_item_tracking import PortfolioIdDelResponsePortfolioItemTracking
from io_stockx.models.portfolio_request import PortfolioRequest
from io_stockx.models.portfolio_request_portfolio_item import PortfolioRequestPortfolioItem
from io_stockx.models.portfolio_response import PortfolioResponse
from io_stockx.models.portfolio_response_portfolio_item import PortfolioResponsePortfolioItem
from io_stockx.models.portfolio_response_portfolio_item_product import PortfolioResponsePortfolioItemProduct
from io_stockx.models.portfolio_response_portfolio_item_product_market import PortfolioResponsePortfolioItemProductMarket
from io_stockx.models.portfolio_response_portfolio_item_product_media import PortfolioResponsePortfolioItemProductMedia
from io_stockx.models.portfolio_response_portfolio_item_tracking import PortfolioResponsePortfolioItemTracking
from io_stockx.models.portfolioitems_id_get_response import PortfolioitemsIdGetResponse
from io_stockx.models.portfolioitems_id_get_response_portfolio_item import PortfolioitemsIdGetResponsePortfolioItem
from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product import PortfolioitemsIdGetResponsePortfolioItemProduct
from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product_market import PortfolioitemsIdGetResponsePortfolioItemProductMarket
from io_stockx.models.product_info import ProductInfo
from io_stockx.models.product_info_attributes import ProductInfoAttributes
from io_stockx.models.product_info_attributes_traits import ProductInfoAttributesTraits
from io_stockx.models.product_info_data import ProductInfoData
from io_stockx.models.product_info_meta import ProductInfoMeta
from io_stockx.models.product_info_product import ProductInfoProduct
from io_stockx.models.product_info_product_attributes import ProductInfoProductAttributes
from io_stockx.models.product_lookup_response import ProductLookupResponse
from io_stockx.models.product_response import ProductResponse
from io_stockx.models.product_response_product import ProductResponseProduct
from io_stockx.models.product_response_product_children import ProductResponseProductChildren
from io_stockx.models.product_response_product_children_productid import ProductResponseProductChildrenPRODUCTID
from io_stockx.models.product_response_product_children_productid_market import ProductResponseProductChildrenPRODUCTIDMarket
from io_stockx.models.product_response_product_media import ProductResponseProductMedia
from io_stockx.models.product_response_product_meta import ProductResponseProductMeta
from io_stockx.models.search_hit import SearchHit
from io_stockx.models.search_hit_media import SearchHitMedia
from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits
from io_stockx.models.search_results import SearchResults
from io_stockx.models.subscriptions_response import SubscriptionsResponse
from io_stockx.models.webhooks_get_response import WebhooksGetResponse
from io_stockx.models.webhooks_id_get_response import WebhooksIdGetResponse
from io_stockx.models.webhooks_post_request import WebhooksPostRequest
from io_stockx.models.webhooks_post_response import WebhooksPostResponse
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomerObjectMerchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'merchant_id': 'str',
'paypal_email': 'str',
'preferred_payout': 'str',
'account_name': 'str'
}
attribute_map = {
'merchant_id': 'merchantId',
'paypal_email': 'paypalEmail',
'preferred_payout': 'preferredPayout',
'account_name': 'accountName'
}
def __init__(self, merchant_id=None, paypal_email=None, preferred_payout=None, account_name=None): # noqa: E501
"""CustomerObjectMerchant - a model defined in Swagger""" # noqa: E501
self._merchant_id = None
self._paypal_email = None
self._preferred_payout = None
self._account_name = None
self.discriminator = None
if merchant_id is not None:
self.merchant_id = merchant_id
if paypal_email is not None:
self.paypal_email = paypal_email
if preferred_payout is not None:
self.preferred_payout = preferred_payout
if account_name is not None:
self.account_name = account_name
@property
def merchant_id(self):
"""Gets the merchant_id of this CustomerObjectMerchant. # noqa: E501
:return: The merchant_id of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this CustomerObjectMerchant.
:param merchant_id: The merchant_id of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._merchant_id = merchant_id
@property
def paypal_email(self):
"""Gets the paypal_email of this CustomerObjectMerchant. # noqa: E501
:return: The paypal_email of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._paypal_email
@paypal_email.setter
def paypal_email(self, paypal_email):
"""Sets the paypal_email of this CustomerObjectMerchant.
:param paypal_email: The paypal_email of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._paypal_email = paypal_email
@property
def preferred_payout(self):
"""Gets the preferred_payout of this CustomerObjectMerchant. # noqa: E501
:return: The preferred_payout of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._preferred_payout
@preferred_payout.setter
def preferred_payout(self, preferred_payout):
"""Sets the preferred_payout of this CustomerObjectMerchant.
:param preferred_payout: The preferred_payout of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._preferred_payout = preferred_payout
@property
def account_name(self):
"""Gets the account_name of this CustomerObjectMerchant. # noqa: E501
:return: The account_name of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this CustomerObjectMerchant.
:param account_name: The account_name of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._account_name = account_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerObjectMerchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioIdDelRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'chain_id': 'str',
'notes': 'str'
}
attribute_map = {
'chain_id': 'chain_id',
'notes': 'notes'
}
def __init__(self, chain_id=None, notes=None): # noqa: E501
"""PortfolioIdDelRequest - a model defined in Swagger""" # noqa: E501
self._chain_id = None
self._notes = None
self.discriminator = None
self.chain_id = chain_id
self.notes = notes
@property
def chain_id(self):
"""Gets the chain_id of this PortfolioIdDelRequest. # noqa: E501
:return: The chain_id of this PortfolioIdDelRequest. # noqa: E501
:rtype: str
"""
return self._chain_id
@chain_id.setter
def chain_id(self, chain_id):
"""Sets the chain_id of this PortfolioIdDelRequest.
:param chain_id: The chain_id of this PortfolioIdDelRequest. # noqa: E501
:type: str
"""
if chain_id is None:
raise ValueError("Invalid value for `chain_id`, must not be `None`") # noqa: E501
self._chain_id = chain_id
@property
def notes(self):
"""Gets the notes of this PortfolioIdDelRequest. # noqa: E501
:return: The notes of this PortfolioIdDelRequest. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this PortfolioIdDelRequest.
:param notes: The notes of this PortfolioIdDelRequest. # noqa: E501
:type: str
"""
if notes is None:
raise ValueError("Invalid value for `notes`, must not be `None`") # noqa: E501
self._notes = notes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioIdDelResponsePortfolioItemProductShipping(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_days_to_ship': 'int',
'has_additional_days_to_ship': 'bool',
'delivery_days_lower_bound': 'int',
'delivery_days_upper_bound': 'int'
}
attribute_map = {
'total_days_to_ship': 'totalDaysToShip',
'has_additional_days_to_ship': 'hasAdditionalDaysToShip',
'delivery_days_lower_bound': 'deliveryDaysLowerBound',
'delivery_days_upper_bound': 'deliveryDaysUpperBound'
}
def __init__(self, total_days_to_ship=None, has_additional_days_to_ship=None, delivery_days_lower_bound=None, delivery_days_upper_bound=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItemProductShipping - a model defined in Swagger""" # noqa: E501
self._total_days_to_ship = None
self._has_additional_days_to_ship = None
self._delivery_days_lower_bound = None
self._delivery_days_upper_bound = None
self.discriminator = None
self.total_days_to_ship = total_days_to_ship
self.has_additional_days_to_ship = has_additional_days_to_ship
self.delivery_days_lower_bound = delivery_days_lower_bound
self.delivery_days_upper_bound = delivery_days_upper_bound
@property
def total_days_to_ship(self):
"""Gets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._total_days_to_ship
@total_days_to_ship.setter
def total_days_to_ship(self, total_days_to_ship):
"""Sets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param total_days_to_ship: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if total_days_to_ship is None:
raise ValueError("Invalid value for `total_days_to_ship`, must not be `None`") # noqa: E501
self._total_days_to_ship = total_days_to_ship
@property
def has_additional_days_to_ship(self):
"""Gets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: bool
"""
return self._has_additional_days_to_ship
@has_additional_days_to_ship.setter
def has_additional_days_to_ship(self, has_additional_days_to_ship):
"""Sets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param has_additional_days_to_ship: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: bool
"""
if has_additional_days_to_ship is None:
raise ValueError("Invalid value for `has_additional_days_to_ship`, must not be `None`") # noqa: E501
self._has_additional_days_to_ship = has_additional_days_to_ship
@property
def delivery_days_lower_bound(self):
"""Gets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._delivery_days_lower_bound
@delivery_days_lower_bound.setter
def delivery_days_lower_bound(self, delivery_days_lower_bound):
"""Sets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param delivery_days_lower_bound: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if delivery_days_lower_bound is None:
raise ValueError("Invalid value for `delivery_days_lower_bound`, must not be `None`") # noqa: E501
self._delivery_days_lower_bound = delivery_days_lower_bound
@property
def delivery_days_upper_bound(self):
"""Gets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._delivery_days_upper_bound
@delivery_days_upper_bound.setter
def delivery_days_upper_bound(self, delivery_days_upper_bound):
"""Sets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param delivery_days_upper_bound: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if delivery_days_upper_bound is None:
raise ValueError("Invalid value for `delivery_days_upper_bound`, must not be `None`") # noqa: E501
self._delivery_days_upper_bound = delivery_days_upper_bound
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItemProductShipping):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddressObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'first_name': 'str',
'last_name': 'str',
'telephone': 'str',
'street_address': 'str',
'extended_address': 'str',
'locality': 'str',
'region': 'str',
'postal_code': 'str',
'country_code_alpha2': 'str'
}
attribute_map = {
'first_name': 'firstName',
'last_name': 'lastName',
'telephone': 'telephone',
'street_address': 'streetAddress',
'extended_address': 'extendedAddress',
'locality': 'locality',
'region': 'region',
'postal_code': 'postalCode',
'country_code_alpha2': 'countryCodeAlpha2'
}
def __init__(self, first_name=None, last_name=None, telephone=None, street_address=None, extended_address=None, locality=None, region=None, postal_code=None, country_code_alpha2=None): # noqa: E501
"""AddressObject - a model defined in Swagger""" # noqa: E501
self._first_name = None
self._last_name = None
self._telephone = None
self._street_address = None
self._extended_address = None
self._locality = None
self._region = None
self._postal_code = None
self._country_code_alpha2 = None
self.discriminator = None
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if telephone is not None:
self.telephone = telephone
if street_address is not None:
self.street_address = street_address
if extended_address is not None:
self.extended_address = extended_address
if locality is not None:
self.locality = locality
if region is not None:
self.region = region
if postal_code is not None:
self.postal_code = postal_code
if country_code_alpha2 is not None:
self.country_code_alpha2 = country_code_alpha2
@property
def first_name(self):
"""Gets the first_name of this AddressObject. # noqa: E501
:return: The first_name of this AddressObject. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this AddressObject.
:param first_name: The first_name of this AddressObject. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this AddressObject. # noqa: E501
:return: The last_name of this AddressObject. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this AddressObject.
:param last_name: The last_name of this AddressObject. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def telephone(self):
"""Gets the telephone of this AddressObject. # noqa: E501
:return: The telephone of this AddressObject. # noqa: E501
:rtype: str
"""
return self._telephone
@telephone.setter
def telephone(self, telephone):
"""Sets the telephone of this AddressObject.
:param telephone: The telephone of this AddressObject. # noqa: E501
:type: str
"""
self._telephone = telephone
@property
def street_address(self):
"""Gets the street_address of this AddressObject. # noqa: E501
:return: The street_address of this AddressObject. # noqa: E501
:rtype: str
"""
return self._street_address
@street_address.setter
def street_address(self, street_address):
"""Sets the street_address of this AddressObject.
:param street_address: The street_address of this AddressObject. # noqa: E501
:type: str
"""
self._street_address = street_address
@property
def extended_address(self):
"""Gets the extended_address of this AddressObject. # noqa: E501
:return: The extended_address of this AddressObject. # noqa: E501
:rtype: str
"""
return self._extended_address
@extended_address.setter
def extended_address(self, extended_address):
"""Sets the extended_address of this AddressObject.
:param extended_address: The extended_address of this AddressObject. # noqa: E501
:type: str
"""
self._extended_address = extended_address
@property
def locality(self):
"""Gets the locality of this AddressObject. # noqa: E501
:return: The locality of this AddressObject. # noqa: E501
:rtype: str
"""
return self._locality
@locality.setter
def locality(self, locality):
"""Sets the locality of this AddressObject.
:param locality: The locality of this AddressObject. # noqa: E501
:type: str
"""
self._locality = locality
@property
def region(self):
"""Gets the region of this AddressObject. # noqa: E501
:return: The region of this AddressObject. # noqa: E501
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this AddressObject.
:param region: The region of this AddressObject. # noqa: E501
:type: str
"""
self._region = region
@property
def postal_code(self):
"""Gets the postal_code of this AddressObject. # noqa: E501
:return: The postal_code of this AddressObject. # noqa: E501
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""Sets the postal_code of this AddressObject.
:param postal_code: The postal_code of this AddressObject. # noqa: E501
:type: str
"""
self._postal_code = postal_code
@property
def country_code_alpha2(self):
"""Gets the country_code_alpha2 of this AddressObject. # noqa: E501
:return: The country_code_alpha2 of this AddressObject. # noqa: E501
:rtype: str
"""
return self._country_code_alpha2
@country_code_alpha2.setter
def country_code_alpha2(self, country_code_alpha2):
"""Sets the country_code_alpha2 of this AddressObject.
:param country_code_alpha2: The country_code_alpha2 of this AddressObject. # noqa: E501
:type: str
"""
self._country_code_alpha2 = country_code_alpha2
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddressObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.search_hit_media import SearchHitMedia # noqa: F401,E501
from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits # noqa: F401,E501
class SearchHit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'brand': 'str',
'thumbnail_url': 'str',
'media': 'SearchHitMedia',
'url': 'str',
'release_date': 'str',
'categories': 'list[str]',
'product_category': 'str',
'ticker_symbol': 'str',
'style_id': 'str',
'make': 'str',
'model': 'str',
'short_description': 'str',
'gender': 'str',
'colorway': 'str',
'price': 'int',
'description': 'str',
'highest_bid': 'str',
'total_dollars': 'str',
'lowest_ask': 'str',
'last_sale': 'str',
'sales_last_72': 'int',
'deadstock_sold': 'int',
'quality_bid': 'int',
'active': 'int',
'new_release': 'str',
'searchable_traits': 'SearchHitSearchableTraits',
'object_id': 'str',
'annual_high': 'str',
'annual_low': 'str',
'deadstock_range_low': 'str',
'deadstock_range_high': 'str',
'average_deadstock_price': 'str',
'change_value': 'str'
}
attribute_map = {
'name': 'name',
'brand': 'brand',
'thumbnail_url': 'thumbnail_url',
'media': 'media',
'url': 'url',
'release_date': 'release_date',
'categories': 'categories',
'product_category': 'product_category',
'ticker_symbol': 'ticker_symbol',
'style_id': 'style_id',
'make': 'make',
'model': 'model',
'short_description': 'short_description',
'gender': 'gender',
'colorway': 'colorway',
'price': 'price',
'description': 'description',
'highest_bid': 'highest_bid',
'total_dollars': 'total_dollars',
'lowest_ask': 'lowest_ask',
'last_sale': 'last_sale',
'sales_last_72': 'sales_last_72',
'deadstock_sold': 'deadstock_sold',
'quality_bid': 'quality_bid',
'active': 'active',
'new_release': 'new_release',
'searchable_traits': 'searchable_traits',
'object_id': 'objectID',
'annual_high': 'annual_high',
'annual_low': 'annual_low',
'deadstock_range_low': 'deadstock_range_low',
'deadstock_range_high': 'deadstock_range_high',
'average_deadstock_price': 'average_deadstock_price',
'change_value': 'change_value'
}
def __init__(self, name=None, brand=None, thumbnail_url=None, media=None, url=None, release_date=None, categories=None, product_category=None, ticker_symbol=None, style_id=None, make=None, model=None, short_description=None, gender=None, colorway=None, price=None, description=None, highest_bid=None, total_dollars=None, lowest_ask=None, last_sale=None, sales_last_72=None, deadstock_sold=None, quality_bid=None, active=None, new_release=None, searchable_traits=None, object_id=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, average_deadstock_price=None, change_value=None): # noqa: E501
"""SearchHit - a model defined in Swagger""" # noqa: E501
self._name = None
self._brand = None
self._thumbnail_url = None
self._media = None
self._url = None
self._release_date = None
self._categories = None
self._product_category = None
self._ticker_symbol = None
self._style_id = None
self._make = None
self._model = None
self._short_description = None
self._gender = None
self._colorway = None
self._price = None
self._description = None
self._highest_bid = None
self._total_dollars = None
self._lowest_ask = None
self._last_sale = None
self._sales_last_72 = None
self._deadstock_sold = None
self._quality_bid = None
self._active = None
self._new_release = None
self._searchable_traits = None
self._object_id = None
self._annual_high = None
self._annual_low = None
self._deadstock_range_low = None
self._deadstock_range_high = None
self._average_deadstock_price = None
self._change_value = None
self.discriminator = None
if name is not None:
self.name = name
if brand is not None:
self.brand = brand
if thumbnail_url is not None:
self.thumbnail_url = thumbnail_url
if media is not None:
self.media = media
if url is not None:
self.url = url
if release_date is not None:
self.release_date = release_date
if categories is not None:
self.categories = categories
if product_category is not None:
self.product_category = product_category
if ticker_symbol is not None:
self.ticker_symbol = ticker_symbol
if style_id is not None:
self.style_id = style_id
if make is not None:
self.make = make
if model is not None:
self.model = model
if short_description is not None:
self.short_description = short_description
if gender is not None:
self.gender = gender
if colorway is not None:
self.colorway = colorway
if price is not None:
self.price = price
if description is not None:
self.description = description
if highest_bid is not None:
self.highest_bid = highest_bid
if total_dollars is not None:
self.total_dollars = total_dollars
if lowest_ask is not None:
self.lowest_ask = lowest_ask
if last_sale is not None:
self.last_sale = last_sale
if sales_last_72 is not None:
self.sales_last_72 = sales_last_72
if deadstock_sold is not None:
self.deadstock_sold = deadstock_sold
if quality_bid is not None:
self.quality_bid = quality_bid
if active is not None:
self.active = active
if new_release is not None:
self.new_release = new_release
if searchable_traits is not None:
self.searchable_traits = searchable_traits
if object_id is not None:
self.object_id = object_id
if annual_high is not None:
self.annual_high = annual_high
if annual_low is not None:
self.annual_low = annual_low
if deadstock_range_low is not None:
self.deadstock_range_low = deadstock_range_low
if deadstock_range_high is not None:
self.deadstock_range_high = deadstock_range_high
if average_deadstock_price is not None:
self.average_deadstock_price = average_deadstock_price
if change_value is not None:
self.change_value = change_value
@property
def name(self):
"""Gets the name of this SearchHit. # noqa: E501
:return: The name of this SearchHit. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SearchHit.
:param name: The name of this SearchHit. # noqa: E501
:type: str
"""
self._name = name
@property
def brand(self):
"""Gets the brand of this SearchHit. # noqa: E501
:return: The brand of this SearchHit. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this SearchHit.
:param brand: The brand of this SearchHit. # noqa: E501
:type: str
"""
self._brand = brand
@property
def thumbnail_url(self):
"""Gets the thumbnail_url of this SearchHit. # noqa: E501
:return: The thumbnail_url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._thumbnail_url
@thumbnail_url.setter
def thumbnail_url(self, thumbnail_url):
"""Sets the thumbnail_url of this SearchHit.
:param thumbnail_url: The thumbnail_url of this SearchHit. # noqa: E501
:type: str
"""
self._thumbnail_url = thumbnail_url
@property
def media(self):
"""Gets the media of this SearchHit. # noqa: E501
:return: The media of this SearchHit. # noqa: E501
:rtype: SearchHitMedia
"""
return self._media
@media.setter
def media(self, media):
"""Sets the media of this SearchHit.
:param media: The media of this SearchHit. # noqa: E501
:type: SearchHitMedia
"""
self._media = media
@property
def url(self):
"""Gets the url of this SearchHit. # noqa: E501
:return: The url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this SearchHit.
:param url: The url of this SearchHit. # noqa: E501
:type: str
"""
self._url = url
@property
def release_date(self):
"""Gets the release_date of this SearchHit. # noqa: E501
:return: The release_date of this SearchHit. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this SearchHit.
:param release_date: The release_date of this SearchHit. # noqa: E501
:type: str
"""
self._release_date = release_date
@property
def categories(self):
"""Gets the categories of this SearchHit. # noqa: E501
:return: The categories of this SearchHit. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this SearchHit.
:param categories: The categories of this SearchHit. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def product_category(self):
"""Gets the product_category of this SearchHit. # noqa: E501
:return: The product_category of this SearchHit. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this SearchHit.
:param product_category: The product_category of this SearchHit. # noqa: E501
:type: str
"""
self._product_category = product_category
@property
def ticker_symbol(self):
"""Gets the ticker_symbol of this SearchHit. # noqa: E501
:return: The ticker_symbol of this SearchHit. # noqa: E501
:rtype: str
"""
return self._ticker_symbol
@ticker_symbol.setter
def ticker_symbol(self, ticker_symbol):
"""Sets the ticker_symbol of this SearchHit.
:param ticker_symbol: The ticker_symbol of this SearchHit. # noqa: E501
:type: str
"""
self._ticker_symbol = ticker_symbol
@property
def style_id(self):
"""Gets the style_id of this SearchHit. # noqa: E501
:return: The style_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this SearchHit.
:param style_id: The style_id of this SearchHit. # noqa: E501
:type: str
"""
self._style_id = style_id
@property
def make(self):
"""Gets the make of this SearchHit. # noqa: E501
:return: The make of this SearchHit. # noqa: E501
:rtype: str
"""
return self._make
@make.setter
def make(self, make):
"""Sets the make of this SearchHit.
:param make: The make of this SearchHit. # noqa: E501
:type: str
"""
self._make = make
@property
def model(self):
"""Gets the model of this SearchHit. # noqa: E501
:return: The model of this SearchHit. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this SearchHit.
:param model: The model of this SearchHit. # noqa: E501
:type: str
"""
self._model = model
@property
def short_description(self):
"""Gets the short_description of this SearchHit. # noqa: E501
:return: The short_description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this SearchHit.
:param short_description: The short_description of this SearchHit. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def gender(self):
"""Gets the gender of this SearchHit. # noqa: E501
:return: The gender of this SearchHit. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this SearchHit.
:param gender: The gender of this SearchHit. # noqa: E501
:type: str
"""
self._gender = gender
@property
def colorway(self):
"""Gets the colorway of this SearchHit. # noqa: E501
:return: The colorway of this SearchHit. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this SearchHit.
:param colorway: The colorway of this SearchHit. # noqa: E501
:type: str
"""
self._colorway = colorway
@property
def price(self):
"""Gets the price of this SearchHit. # noqa: E501
:return: The price of this SearchHit. # noqa: E501
:rtype: int
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this SearchHit.
:param price: The price of this SearchHit. # noqa: E501
:type: int
"""
self._price = price
@property
def description(self):
"""Gets the description of this SearchHit. # noqa: E501
:return: The description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SearchHit.
:param description: The description of this SearchHit. # noqa: E501
:type: str
"""
self._description = description
@property
def highest_bid(self):
"""Gets the highest_bid of this SearchHit. # noqa: E501
:return: The highest_bid of this SearchHit. # noqa: E501
:rtype: str
"""
return self._highest_bid
@highest_bid.setter
def highest_bid(self, highest_bid):
"""Sets the highest_bid of this SearchHit.
:param highest_bid: The highest_bid of this SearchHit. # noqa: E501
:type: str
"""
self._highest_bid = highest_bid
@property
def total_dollars(self):
"""Gets the total_dollars of this SearchHit. # noqa: E501
:return: The total_dollars of this SearchHit. # noqa: E501
:rtype: str
"""
return self._total_dollars
@total_dollars.setter
def total_dollars(self, total_dollars):
"""Sets the total_dollars of this SearchHit.
:param total_dollars: The total_dollars of this SearchHit. # noqa: E501
:type: str
"""
self._total_dollars = total_dollars
@property
def lowest_ask(self):
"""Gets the lowest_ask of this SearchHit. # noqa: E501
:return: The lowest_ask of this SearchHit. # noqa: E501
:rtype: str
"""
return self._lowest_ask
@lowest_ask.setter
def lowest_ask(self, lowest_ask):
"""Sets the lowest_ask of this SearchHit.
:param lowest_ask: The lowest_ask of this SearchHit. # noqa: E501
:type: str
"""
self._lowest_ask = lowest_ask
@property
def last_sale(self):
"""Gets the last_sale of this SearchHit. # noqa: E501
:return: The last_sale of this SearchHit. # noqa: E501
:rtype: str
"""
return self._last_sale
@last_sale.setter
def last_sale(self, last_sale):
"""Sets the last_sale of this SearchHit.
:param last_sale: The last_sale of this SearchHit. # noqa: E501
:type: str
"""
self._last_sale = last_sale
@property
def sales_last_72(self):
"""Gets the sales_last_72 of this SearchHit. # noqa: E501
:return: The sales_last_72 of this SearchHit. # noqa: E501
:rtype: int
"""
return self._sales_last_72
@sales_last_72.setter
def sales_last_72(self, sales_last_72):
"""Sets the sales_last_72 of this SearchHit.
:param sales_last_72: The sales_last_72 of this SearchHit. # noqa: E501
:type: int
"""
self._sales_last_72 = sales_last_72
@property
def deadstock_sold(self):
"""Gets the deadstock_sold of this SearchHit. # noqa: E501
:return: The deadstock_sold of this SearchHit. # noqa: E501
:rtype: int
"""
return self._deadstock_sold
@deadstock_sold.setter
def deadstock_sold(self, deadstock_sold):
"""Sets the deadstock_sold of this SearchHit.
:param deadstock_sold: The deadstock_sold of this SearchHit. # noqa: E501
:type: int
"""
self._deadstock_sold = deadstock_sold
@property
def quality_bid(self):
"""Gets the quality_bid of this SearchHit. # noqa: E501
:return: The quality_bid of this SearchHit. # noqa: E501
:rtype: int
"""
return self._quality_bid
@quality_bid.setter
def quality_bid(self, quality_bid):
"""Sets the quality_bid of this SearchHit.
:param quality_bid: The quality_bid of this SearchHit. # noqa: E501
:type: int
"""
self._quality_bid = quality_bid
@property
def active(self):
"""Gets the active of this SearchHit. # noqa: E501
:return: The active of this SearchHit. # noqa: E501
:rtype: int
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this SearchHit.
:param active: The active of this SearchHit. # noqa: E501
:type: int
"""
self._active = active
@property
def new_release(self):
"""Gets the new_release of this SearchHit. # noqa: E501
:return: The new_release of this SearchHit. # noqa: E501
:rtype: str
"""
return self._new_release
@new_release.setter
def new_release(self, new_release):
"""Sets the new_release of this SearchHit.
:param new_release: The new_release of this SearchHit. # noqa: E501
:type: str
"""
self._new_release = new_release
@property
def searchable_traits(self):
"""Gets the searchable_traits of this SearchHit. # noqa: E501
:return: The searchable_traits of this SearchHit. # noqa: E501
:rtype: SearchHitSearchableTraits
"""
return self._searchable_traits
@searchable_traits.setter
def searchable_traits(self, searchable_traits):
"""Sets the searchable_traits of this SearchHit.
:param searchable_traits: The searchable_traits of this SearchHit. # noqa: E501
:type: SearchHitSearchableTraits
"""
self._searchable_traits = searchable_traits
@property
def object_id(self):
"""Gets the object_id of this SearchHit. # noqa: E501
:return: The object_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._object_id
@object_id.setter
def object_id(self, object_id):
"""Sets the object_id of this SearchHit.
:param object_id: The object_id of this SearchHit. # noqa: E501
:type: str
"""
self._object_id = object_id
@property
def annual_high(self):
"""Gets the annual_high of this SearchHit. # noqa: E501
:return: The annual_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_high
@annual_high.setter
def annual_high(self, annual_high):
"""Sets the annual_high of this SearchHit.
:param annual_high: The annual_high of this SearchHit. # noqa: E501
:type: str
"""
self._annual_high = annual_high
@property
def annual_low(self):
"""Gets the annual_low of this SearchHit. # noqa: E501
:return: The annual_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_low
@annual_low.setter
def annual_low(self, annual_low):
"""Sets the annual_low of this SearchHit.
:param annual_low: The annual_low of this SearchHit. # noqa: E501
:type: str
"""
self._annual_low = annual_low
@property
def deadstock_range_low(self):
"""Gets the deadstock_range_low of this SearchHit. # noqa: E501
:return: The deadstock_range_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_low
@deadstock_range_low.setter
def deadstock_range_low(self, deadstock_range_low):
"""Sets the deadstock_range_low of this SearchHit.
:param deadstock_range_low: The deadstock_range_low of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_low = deadstock_range_low
@property
def deadstock_range_high(self):
"""Gets the deadstock_range_high of this SearchHit. # noqa: E501
:return: The deadstock_range_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_high
@deadstock_range_high.setter
def deadstock_range_high(self, deadstock_range_high):
"""Sets the deadstock_range_high of this SearchHit.
:param deadstock_range_high: The deadstock_range_high of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_high = deadstock_range_high
@property
def average_deadstock_price(self):
"""Gets the average_deadstock_price of this SearchHit. # noqa: E501
:return: The average_deadstock_price of this SearchHit. # noqa: E501
:rtype: str
"""
return self._average_deadstock_price
@average_deadstock_price.setter
def average_deadstock_price(self, average_deadstock_price):
"""Sets the average_deadstock_price of this SearchHit.
:param average_deadstock_price: The average_deadstock_price of this SearchHit. # noqa: E501
:type: str
"""
self._average_deadstock_price = average_deadstock_price
@property
def change_value(self):
"""Gets the change_value of this SearchHit. # noqa: E501
:return: The change_value of this SearchHit. # noqa: E501
:rtype: str
"""
return self._change_value
@change_value.setter
def change_value(self, change_value):
"""Sets the change_value of this SearchHit.
:param change_value: The change_value of this SearchHit. # noqa: E501
:type: str
"""
self._change_value = change_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioRequestPortfolioItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'amount': 'str',
'expires_at': 'str',
'matched_with_date': 'str',
'condition': 'str',
'action': 'int',
'sku_uuid': 'str'
}
attribute_map = {
'amount': 'amount',
'expires_at': 'expiresAt',
'matched_with_date': 'matchedWithDate',
'condition': 'condition',
'action': 'action',
'sku_uuid': 'skuUuid'
}
def __init__(self, amount=None, expires_at=None, matched_with_date=None, condition=None, action=None, sku_uuid=None): # noqa: E501
"""PortfolioRequestPortfolioItem - a model defined in Swagger""" # noqa: E501
self._amount = None
self._expires_at = None
self._matched_with_date = None
self._condition = None
self._action = None
self._sku_uuid = None
self.discriminator = None
if amount is not None:
self.amount = amount
if expires_at is not None:
self.expires_at = expires_at
if matched_with_date is not None:
self.matched_with_date = matched_with_date
if condition is not None:
self.condition = condition
if action is not None:
self.action = action
if sku_uuid is not None:
self.sku_uuid = sku_uuid
@property
def amount(self):
"""Gets the amount of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The amount of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: str
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PortfolioRequestPortfolioItem.
:param amount: The amount of this PortfolioRequestPortfolioItem. # noqa: E501
:type: str
"""
self._amount = amount
@property
def expires_at(self):
"""Gets the expires_at of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The expires_at of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: str
"""
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
"""Sets the expires_at of this PortfolioRequestPortfolioItem.
:param expires_at: The expires_at of this PortfolioRequestPortfolioItem. # noqa: E501
:type: str
"""
self._expires_at = expires_at
@property
def matched_with_date(self):
"""Gets the matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: str
"""
return self._matched_with_date
@matched_with_date.setter
def matched_with_date(self, matched_with_date):
"""Sets the matched_with_date of this PortfolioRequestPortfolioItem.
:param matched_with_date: The matched_with_date of this PortfolioRequestPortfolioItem. # noqa: E501
:type: str
"""
self._matched_with_date = matched_with_date
@property
def condition(self):
"""Gets the condition of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The condition of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PortfolioRequestPortfolioItem.
:param condition: The condition of this PortfolioRequestPortfolioItem. # noqa: E501
:type: str
"""
self._condition = condition
@property
def action(self):
"""Gets the action of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The action of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: int
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this PortfolioRequestPortfolioItem.
:param action: The action of this PortfolioRequestPortfolioItem. # noqa: E501
:type: int
"""
self._action = action
@property
def sku_uuid(self):
"""Gets the sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501
:return: The sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501
:rtype: str
"""
return self._sku_uuid
@sku_uuid.setter
def sku_uuid(self, sku_uuid):
"""Sets the sku_uuid of this PortfolioRequestPortfolioItem.
:param sku_uuid: The sku_uuid of this PortfolioRequestPortfolioItem. # noqa: E501
:type: str
"""
self._sku_uuid = sku_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioRequestPortfolioItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.address_object import AddressObject # noqa: F401,E501
class BillingObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'card_type': 'str',
'token': 'str',
'last4': 'str',
'account_email': 'str',
'expiration_date': 'str',
'cardholder_name': 'str',
'address': 'AddressObject'
}
attribute_map = {
'card_type': 'cardType',
'token': 'token',
'last4': 'last4',
'account_email': 'accountEmail',
'expiration_date': 'expirationDate',
'cardholder_name': 'cardholderName',
'address': 'Address'
}
def __init__(self, card_type=None, token=None, last4=None, account_email=None, expiration_date=None, cardholder_name=None, address=None): # noqa: E501
"""BillingObject - a model defined in Swagger""" # noqa: E501
self._card_type = None
self._token = None
self._last4 = None
self._account_email = None
self._expiration_date = None
self._cardholder_name = None
self._address = None
self.discriminator = None
if card_type is not None:
self.card_type = card_type
if token is not None:
self.token = token
if last4 is not None:
self.last4 = last4
if account_email is not None:
self.account_email = account_email
if expiration_date is not None:
self.expiration_date = expiration_date
if cardholder_name is not None:
self.cardholder_name = cardholder_name
if address is not None:
self.address = address
@property
def card_type(self):
"""Gets the card_type of this BillingObject. # noqa: E501
:return: The card_type of this BillingObject. # noqa: E501
:rtype: str
"""
return self._card_type
@card_type.setter
def card_type(self, card_type):
"""Sets the card_type of this BillingObject.
:param card_type: The card_type of this BillingObject. # noqa: E501
:type: str
"""
self._card_type = card_type
@property
def token(self):
"""Gets the token of this BillingObject. # noqa: E501
:return: The token of this BillingObject. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this BillingObject.
:param token: The token of this BillingObject. # noqa: E501
:type: str
"""
self._token = token
@property
def last4(self):
"""Gets the last4 of this BillingObject. # noqa: E501
:return: The last4 of this BillingObject. # noqa: E501
:rtype: str
"""
return self._last4
@last4.setter
def last4(self, last4):
"""Sets the last4 of this BillingObject.
:param last4: The last4 of this BillingObject. # noqa: E501
:type: str
"""
self._last4 = last4
@property
def account_email(self):
"""Gets the account_email of this BillingObject. # noqa: E501
:return: The account_email of this BillingObject. # noqa: E501
:rtype: str
"""
return self._account_email
@account_email.setter
def account_email(self, account_email):
"""Sets the account_email of this BillingObject.
:param account_email: The account_email of this BillingObject. # noqa: E501
:type: str
"""
self._account_email = account_email
@property
def expiration_date(self):
"""Gets the expiration_date of this BillingObject. # noqa: E501
:return: The expiration_date of this BillingObject. # noqa: E501
:rtype: str
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""Sets the expiration_date of this BillingObject.
:param expiration_date: The expiration_date of this BillingObject. # noqa: E501
:type: str
"""
self._expiration_date = expiration_date
@property
def cardholder_name(self):
"""Gets the cardholder_name of this BillingObject. # noqa: E501
:return: The cardholder_name of this BillingObject. # noqa: E501
:rtype: str
"""
return self._cardholder_name
@cardholder_name.setter
def cardholder_name(self, cardholder_name):
"""Sets the cardholder_name of this BillingObject.
:param cardholder_name: The cardholder_name of this BillingObject. # noqa: E501
:type: str
"""
self._cardholder_name = cardholder_name
@property
def address(self):
"""Gets the address of this BillingObject. # noqa: E501
:return: The address of this BillingObject. # noqa: E501
:rtype: AddressObject
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this BillingObject.
:param address: The address of this BillingObject. # noqa: E501
:type: AddressObject
"""
self._address = address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BillingObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.portfolio_id_del_response_portfolio_item_merchant import PortfolioIdDelResponsePortfolioItemMerchant # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product import PortfolioIdDelResponsePortfolioItemProduct # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_tracking import PortfolioIdDelResponsePortfolioItemTracking # noqa: F401,E501
class PortfolioIdDelResponsePortfolioItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'chain_id': 'str',
'customer_id': 'int',
'inventory_id': 'str',
'product_id': 'str',
'sku_uuid': 'str',
'merchant_id': 'int',
'condition': 'int',
'action': 'int',
'action_by': 'int',
'amount': 'int',
'expires_at': 'str',
'expires_at_time': 'int',
'gain_loss_dollars': 'int',
'gain_loss_percentage': 'int',
'market_value': 'str',
'matched_state': 'int',
'purchase_date': 'str',
'purchase_date_time': 'int',
'state': 'int',
'text': 'str',
'notes': 'str',
'created_at_time': 'int',
'can_edit': 'bool',
'can_delete': 'bool',
'tracking': 'PortfolioIdDelResponsePortfolioItemTracking',
'meta': 'object',
'product': 'PortfolioIdDelResponsePortfolioItemProduct',
'merchant': 'PortfolioIdDelResponsePortfolioItemMerchant'
}
attribute_map = {
'chain_id': 'chainId',
'customer_id': 'customerId',
'inventory_id': 'inventoryId',
'product_id': 'productId',
'sku_uuid': 'skuUuid',
'merchant_id': 'merchantId',
'condition': 'condition',
'action': 'action',
'action_by': 'actionBy',
'amount': 'amount',
'expires_at': 'expiresAt',
'expires_at_time': 'expiresAtTime',
'gain_loss_dollars': 'gainLossDollars',
'gain_loss_percentage': 'gainLossPercentage',
'market_value': 'marketValue',
'matched_state': 'matchedState',
'purchase_date': 'purchaseDate',
'purchase_date_time': 'purchaseDateTime',
'state': 'state',
'text': 'text',
'notes': 'notes',
'created_at_time': 'createdAtTime',
'can_edit': 'canEdit',
'can_delete': 'canDelete',
'tracking': 'Tracking',
'meta': 'meta',
'product': 'product',
'merchant': 'Merchant'
}
def __init__(self, chain_id=None, customer_id=None, inventory_id=None, product_id=None, sku_uuid=None, merchant_id=None, condition=None, action=None, action_by=None, amount=None, expires_at=None, expires_at_time=None, gain_loss_dollars=None, gain_loss_percentage=None, market_value=None, matched_state=None, purchase_date=None, purchase_date_time=None, state=None, text=None, notes=None, created_at_time=None, can_edit=None, can_delete=None, tracking=None, meta=None, product=None, merchant=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItem - a model defined in Swagger""" # noqa: E501
self._chain_id = None
self._customer_id = None
self._inventory_id = None
self._product_id = None
self._sku_uuid = None
self._merchant_id = None
self._condition = None
self._action = None
self._action_by = None
self._amount = None
self._expires_at = None
self._expires_at_time = None
self._gain_loss_dollars = None
self._gain_loss_percentage = None
self._market_value = None
self._matched_state = None
self._purchase_date = None
self._purchase_date_time = None
self._state = None
self._text = None
self._notes = None
self._created_at_time = None
self._can_edit = None
self._can_delete = None
self._tracking = None
self._meta = None
self._product = None
self._merchant = None
self.discriminator = None
self.chain_id = chain_id
self.customer_id = customer_id
self.inventory_id = inventory_id
self.product_id = product_id
self.sku_uuid = sku_uuid
self.merchant_id = merchant_id
self.condition = condition
self.action = action
self.action_by = action_by
self.amount = amount
self.expires_at = expires_at
self.expires_at_time = expires_at_time
self.gain_loss_dollars = gain_loss_dollars
self.gain_loss_percentage = gain_loss_percentage
self.market_value = market_value
self.matched_state = matched_state
self.purchase_date = purchase_date
self.purchase_date_time = purchase_date_time
self.state = state
self.text = text
self.notes = notes
self.created_at_time = created_at_time
self.can_edit = can_edit
self.can_delete = can_delete
self.tracking = tracking
self.meta = meta
self.product = product
self.merchant = merchant
@property
def chain_id(self):
"""Gets the chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._chain_id
@chain_id.setter
def chain_id(self, chain_id):
"""Sets the chain_id of this PortfolioIdDelResponsePortfolioItem.
:param chain_id: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if chain_id is None:
raise ValueError("Invalid value for `chain_id`, must not be `None`") # noqa: E501
self._chain_id = chain_id
@property
def customer_id(self):
"""Gets the customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this PortfolioIdDelResponsePortfolioItem.
:param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def inventory_id(self):
"""Gets the inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._inventory_id
@inventory_id.setter
def inventory_id(self, inventory_id):
"""Sets the inventory_id of this PortfolioIdDelResponsePortfolioItem.
:param inventory_id: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if inventory_id is None:
raise ValueError("Invalid value for `inventory_id`, must not be `None`") # noqa: E501
self._inventory_id = inventory_id
@property
def product_id(self):
"""Gets the product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this PortfolioIdDelResponsePortfolioItem.
:param product_id: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if product_id is None:
raise ValueError("Invalid value for `product_id`, must not be `None`") # noqa: E501
self._product_id = product_id
@property
def sku_uuid(self):
"""Gets the sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._sku_uuid
@sku_uuid.setter
def sku_uuid(self, sku_uuid):
"""Sets the sku_uuid of this PortfolioIdDelResponsePortfolioItem.
:param sku_uuid: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if sku_uuid is None:
raise ValueError("Invalid value for `sku_uuid`, must not be `None`") # noqa: E501
self._sku_uuid = sku_uuid
@property
def merchant_id(self):
"""Gets the merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this PortfolioIdDelResponsePortfolioItem.
:param merchant_id: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if merchant_id is None:
raise ValueError("Invalid value for `merchant_id`, must not be `None`") # noqa: E501
self._merchant_id = merchant_id
@property
def condition(self):
"""Gets the condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PortfolioIdDelResponsePortfolioItem.
:param condition: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if condition is None:
raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501
self._condition = condition
@property
def action(self):
"""Gets the action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this PortfolioIdDelResponsePortfolioItem.
:param action: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if action is None:
raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501
self._action = action
@property
def action_by(self):
"""Gets the action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._action_by
@action_by.setter
def action_by(self, action_by):
"""Sets the action_by of this PortfolioIdDelResponsePortfolioItem.
:param action_by: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if action_by is None:
raise ValueError("Invalid value for `action_by`, must not be `None`") # noqa: E501
self._action_by = action_by
@property
def amount(self):
"""Gets the amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PortfolioIdDelResponsePortfolioItem.
:param amount: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def expires_at(self):
"""Gets the expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
"""Sets the expires_at of this PortfolioIdDelResponsePortfolioItem.
:param expires_at: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if expires_at is None:
raise ValueError("Invalid value for `expires_at`, must not be `None`") # noqa: E501
self._expires_at = expires_at
@property
def expires_at_time(self):
"""Gets the expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._expires_at_time
@expires_at_time.setter
def expires_at_time(self, expires_at_time):
"""Sets the expires_at_time of this PortfolioIdDelResponsePortfolioItem.
:param expires_at_time: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if expires_at_time is None:
raise ValueError("Invalid value for `expires_at_time`, must not be `None`") # noqa: E501
self._expires_at_time = expires_at_time
@property
def gain_loss_dollars(self):
"""Gets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._gain_loss_dollars
@gain_loss_dollars.setter
def gain_loss_dollars(self, gain_loss_dollars):
"""Sets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem.
:param gain_loss_dollars: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if gain_loss_dollars is None:
raise ValueError("Invalid value for `gain_loss_dollars`, must not be `None`") # noqa: E501
self._gain_loss_dollars = gain_loss_dollars
@property
def gain_loss_percentage(self):
"""Gets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._gain_loss_percentage
@gain_loss_percentage.setter
def gain_loss_percentage(self, gain_loss_percentage):
"""Sets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem.
:param gain_loss_percentage: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if gain_loss_percentage is None:
raise ValueError("Invalid value for `gain_loss_percentage`, must not be `None`") # noqa: E501
self._gain_loss_percentage = gain_loss_percentage
@property
def market_value(self):
"""Gets the market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._market_value
@market_value.setter
def market_value(self, market_value):
"""Sets the market_value of this PortfolioIdDelResponsePortfolioItem.
:param market_value: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if market_value is None:
raise ValueError("Invalid value for `market_value`, must not be `None`") # noqa: E501
self._market_value = market_value
@property
def matched_state(self):
"""Gets the matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._matched_state
@matched_state.setter
def matched_state(self, matched_state):
"""Sets the matched_state of this PortfolioIdDelResponsePortfolioItem.
:param matched_state: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if matched_state is None:
raise ValueError("Invalid value for `matched_state`, must not be `None`") # noqa: E501
self._matched_state = matched_state
@property
def purchase_date(self):
"""Gets the purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._purchase_date
@purchase_date.setter
def purchase_date(self, purchase_date):
"""Sets the purchase_date of this PortfolioIdDelResponsePortfolioItem.
:param purchase_date: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if purchase_date is None:
raise ValueError("Invalid value for `purchase_date`, must not be `None`") # noqa: E501
self._purchase_date = purchase_date
@property
def purchase_date_time(self):
"""Gets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._purchase_date_time
@purchase_date_time.setter
def purchase_date_time(self, purchase_date_time):
"""Sets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem.
:param purchase_date_time: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if purchase_date_time is None:
raise ValueError("Invalid value for `purchase_date_time`, must not be `None`") # noqa: E501
self._purchase_date_time = purchase_date_time
@property
def state(self):
"""Gets the state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PortfolioIdDelResponsePortfolioItem.
:param state: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
self._state = state
@property
def text(self):
"""Gets the text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this PortfolioIdDelResponsePortfolioItem.
:param text: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if text is None:
raise ValueError("Invalid value for `text`, must not be `None`") # noqa: E501
self._text = text
@property
def notes(self):
"""Gets the notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this PortfolioIdDelResponsePortfolioItem.
:param notes: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if notes is None:
raise ValueError("Invalid value for `notes`, must not be `None`") # noqa: E501
self._notes = notes
@property
def created_at_time(self):
"""Gets the created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this PortfolioIdDelResponsePortfolioItem.
:param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if created_at_time is None:
raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501
self._created_at_time = created_at_time
@property
def can_edit(self):
"""Gets the can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: bool
"""
return self._can_edit
@can_edit.setter
def can_edit(self, can_edit):
"""Sets the can_edit of this PortfolioIdDelResponsePortfolioItem.
:param can_edit: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: bool
"""
if can_edit is None:
raise ValueError("Invalid value for `can_edit`, must not be `None`") # noqa: E501
self._can_edit = can_edit
@property
def can_delete(self):
"""Gets the can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: bool
"""
return self._can_delete
@can_delete.setter
def can_delete(self, can_delete):
"""Sets the can_delete of this PortfolioIdDelResponsePortfolioItem.
:param can_delete: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: bool
"""
if can_delete is None:
raise ValueError("Invalid value for `can_delete`, must not be `None`") # noqa: E501
self._can_delete = can_delete
@property
def tracking(self):
"""Gets the tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemTracking
"""
return self._tracking
@tracking.setter
def tracking(self, tracking):
"""Sets the tracking of this PortfolioIdDelResponsePortfolioItem.
:param tracking: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemTracking
"""
if tracking is None:
raise ValueError("Invalid value for `tracking`, must not be `None`") # noqa: E501
self._tracking = tracking
@property
def meta(self):
"""Gets the meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: object
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this PortfolioIdDelResponsePortfolioItem.
:param meta: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: object
"""
if meta is None:
raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501
self._meta = meta
@property
def product(self):
"""Gets the product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProduct
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this PortfolioIdDelResponsePortfolioItem.
:param product: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProduct
"""
if product is None:
raise ValueError("Invalid value for `product`, must not be `None`") # noqa: E501
self._product = product
@property
def merchant(self):
"""Gets the merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemMerchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this PortfolioIdDelResponsePortfolioItem.
:param merchant: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemMerchant
"""
if merchant is None:
raise ValueError("Invalid value for `merchant`, must not be `None`") # noqa: E501
self._merchant = merchant
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.search_hit import SearchHit # noqa: F401,E501
class SearchResults(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hits': 'list[SearchHit]',
'nb_hits': 'int'
}
attribute_map = {
'hits': 'hits',
'nb_hits': 'nbHits'
}
def __init__(self, hits=None, nb_hits=None): # noqa: E501
"""SearchResults - a model defined in Swagger""" # noqa: E501
self._hits = None
self._nb_hits = None
self.discriminator = None
if hits is not None:
self.hits = hits
if nb_hits is not None:
self.nb_hits = nb_hits
@property
def hits(self):
"""Gets the hits of this SearchResults. # noqa: E501
:return: The hits of this SearchResults. # noqa: E501
:rtype: list[SearchHit]
"""
return self._hits
@hits.setter
def hits(self, hits):
"""Sets the hits of this SearchResults.
:param hits: The hits of this SearchResults. # noqa: E501
:type: list[SearchHit]
"""
self._hits = hits
@property
def nb_hits(self):
"""Gets the nb_hits of this SearchResults. # noqa: E501
:return: The nb_hits of this SearchResults. # noqa: E501
:rtype: int
"""
return self._nb_hits
@nb_hits.setter
def nb_hits(self, nb_hits):
"""Sets the nb_hits of this SearchResults.
:param nb_hits: The nb_hits of this SearchResults. # noqa: E501
:type: int
"""
self._nb_hits = nb_hits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchResults):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProductInfoProductAttributes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'product_category': 'str',
'url_key': 'str',
'slug': 'str',
'brand': 'str',
'ticker': 'str',
'style_id': 'str',
'model': 'str',
'name': 'str',
'title': 'str',
'size_locale': 'str',
'size_title': 'str',
'size_descriptor': 'str',
'size_all_descriptor': 'str',
'gender': 'str',
'condition': 'str',
'minimum_bid': 'int',
'uniq_bids': 'bool',
'primary_category': 'str',
'secondary_category': 'str'
}
attribute_map = {
'product_category': 'product_category',
'url_key': 'url_key',
'slug': 'slug',
'brand': 'brand',
'ticker': 'ticker',
'style_id': 'style_id',
'model': 'model',
'name': 'name',
'title': 'title',
'size_locale': 'size_locale',
'size_title': 'size_title',
'size_descriptor': 'size_descriptor',
'size_all_descriptor': 'size_all_descriptor',
'gender': 'gender',
'condition': 'condition',
'minimum_bid': 'minimum_bid',
'uniq_bids': 'uniq_bids',
'primary_category': 'primary_category',
'secondary_category': 'secondary_category'
}
def __init__(self, product_category=None, url_key=None, slug=None, brand=None, ticker=None, style_id=None, model=None, name=None, title=None, size_locale=None, size_title=None, size_descriptor=None, size_all_descriptor=None, gender=None, condition=None, minimum_bid=None, uniq_bids=None, primary_category=None, secondary_category=None): # noqa: E501
"""ProductInfoProductAttributes - a model defined in Swagger""" # noqa: E501
self._product_category = None
self._url_key = None
self._slug = None
self._brand = None
self._ticker = None
self._style_id = None
self._model = None
self._name = None
self._title = None
self._size_locale = None
self._size_title = None
self._size_descriptor = None
self._size_all_descriptor = None
self._gender = None
self._condition = None
self._minimum_bid = None
self._uniq_bids = None
self._primary_category = None
self._secondary_category = None
self.discriminator = None
if product_category is not None:
self.product_category = product_category
if url_key is not None:
self.url_key = url_key
if slug is not None:
self.slug = slug
if brand is not None:
self.brand = brand
if ticker is not None:
self.ticker = ticker
if style_id is not None:
self.style_id = style_id
if model is not None:
self.model = model
if name is not None:
self.name = name
if title is not None:
self.title = title
if size_locale is not None:
self.size_locale = size_locale
if size_title is not None:
self.size_title = size_title
if size_descriptor is not None:
self.size_descriptor = size_descriptor
if size_all_descriptor is not None:
self.size_all_descriptor = size_all_descriptor
if gender is not None:
self.gender = gender
if condition is not None:
self.condition = condition
if minimum_bid is not None:
self.minimum_bid = minimum_bid
if uniq_bids is not None:
self.uniq_bids = uniq_bids
if primary_category is not None:
self.primary_category = primary_category
if secondary_category is not None:
self.secondary_category = secondary_category
@property
def product_category(self):
"""Gets the product_category of this ProductInfoProductAttributes. # noqa: E501
:return: The product_category of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this ProductInfoProductAttributes.
:param product_category: The product_category of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._product_category = product_category
@property
def url_key(self):
"""Gets the url_key of this ProductInfoProductAttributes. # noqa: E501
:return: The url_key of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._url_key
@url_key.setter
def url_key(self, url_key):
"""Sets the url_key of this ProductInfoProductAttributes.
:param url_key: The url_key of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._url_key = url_key
@property
def slug(self):
"""Gets the slug of this ProductInfoProductAttributes. # noqa: E501
:return: The slug of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._slug
@slug.setter
def slug(self, slug):
"""Sets the slug of this ProductInfoProductAttributes.
:param slug: The slug of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._slug = slug
@property
def brand(self):
"""Gets the brand of this ProductInfoProductAttributes. # noqa: E501
:return: The brand of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this ProductInfoProductAttributes.
:param brand: The brand of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._brand = brand
@property
def ticker(self):
"""Gets the ticker of this ProductInfoProductAttributes. # noqa: E501
:return: The ticker of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._ticker
@ticker.setter
def ticker(self, ticker):
"""Sets the ticker of this ProductInfoProductAttributes.
:param ticker: The ticker of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._ticker = ticker
@property
def style_id(self):
"""Gets the style_id of this ProductInfoProductAttributes. # noqa: E501
:return: The style_id of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this ProductInfoProductAttributes.
:param style_id: The style_id of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._style_id = style_id
@property
def model(self):
"""Gets the model of this ProductInfoProductAttributes. # noqa: E501
:return: The model of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this ProductInfoProductAttributes.
:param model: The model of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._model = model
@property
def name(self):
"""Gets the name of this ProductInfoProductAttributes. # noqa: E501
:return: The name of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProductInfoProductAttributes.
:param name: The name of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._name = name
@property
def title(self):
"""Gets the title of this ProductInfoProductAttributes. # noqa: E501
:return: The title of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ProductInfoProductAttributes.
:param title: The title of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._title = title
@property
def size_locale(self):
"""Gets the size_locale of this ProductInfoProductAttributes. # noqa: E501
:return: The size_locale of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._size_locale
@size_locale.setter
def size_locale(self, size_locale):
"""Sets the size_locale of this ProductInfoProductAttributes.
:param size_locale: The size_locale of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._size_locale = size_locale
@property
def size_title(self):
"""Gets the size_title of this ProductInfoProductAttributes. # noqa: E501
:return: The size_title of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._size_title
@size_title.setter
def size_title(self, size_title):
"""Sets the size_title of this ProductInfoProductAttributes.
:param size_title: The size_title of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._size_title = size_title
@property
def size_descriptor(self):
"""Gets the size_descriptor of this ProductInfoProductAttributes. # noqa: E501
:return: The size_descriptor of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._size_descriptor
@size_descriptor.setter
def size_descriptor(self, size_descriptor):
"""Sets the size_descriptor of this ProductInfoProductAttributes.
:param size_descriptor: The size_descriptor of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._size_descriptor = size_descriptor
@property
def size_all_descriptor(self):
"""Gets the size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501
:return: The size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._size_all_descriptor
@size_all_descriptor.setter
def size_all_descriptor(self, size_all_descriptor):
"""Sets the size_all_descriptor of this ProductInfoProductAttributes.
:param size_all_descriptor: The size_all_descriptor of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._size_all_descriptor = size_all_descriptor
@property
def gender(self):
"""Gets the gender of this ProductInfoProductAttributes. # noqa: E501
:return: The gender of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this ProductInfoProductAttributes.
:param gender: The gender of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._gender = gender
@property
def condition(self):
"""Gets the condition of this ProductInfoProductAttributes. # noqa: E501
:return: The condition of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this ProductInfoProductAttributes.
:param condition: The condition of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._condition = condition
@property
def minimum_bid(self):
"""Gets the minimum_bid of this ProductInfoProductAttributes. # noqa: E501
:return: The minimum_bid of this ProductInfoProductAttributes. # noqa: E501
:rtype: int
"""
return self._minimum_bid
@minimum_bid.setter
def minimum_bid(self, minimum_bid):
"""Sets the minimum_bid of this ProductInfoProductAttributes.
:param minimum_bid: The minimum_bid of this ProductInfoProductAttributes. # noqa: E501
:type: int
"""
self._minimum_bid = minimum_bid
@property
def uniq_bids(self):
"""Gets the uniq_bids of this ProductInfoProductAttributes. # noqa: E501
:return: The uniq_bids of this ProductInfoProductAttributes. # noqa: E501
:rtype: bool
"""
return self._uniq_bids
@uniq_bids.setter
def uniq_bids(self, uniq_bids):
"""Sets the uniq_bids of this ProductInfoProductAttributes.
:param uniq_bids: The uniq_bids of this ProductInfoProductAttributes. # noqa: E501
:type: bool
"""
self._uniq_bids = uniq_bids
@property
def primary_category(self):
"""Gets the primary_category of this ProductInfoProductAttributes. # noqa: E501
:return: The primary_category of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._primary_category
@primary_category.setter
def primary_category(self, primary_category):
"""Sets the primary_category of this ProductInfoProductAttributes.
:param primary_category: The primary_category of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._primary_category = primary_category
@property
def secondary_category(self):
"""Gets the secondary_category of this ProductInfoProductAttributes. # noqa: E501
:return: The secondary_category of this ProductInfoProductAttributes. # noqa: E501
:rtype: str
"""
return self._secondary_category
@secondary_category.setter
def secondary_category(self, secondary_category):
"""Sets the secondary_category of this ProductInfoProductAttributes.
:param secondary_category: The secondary_category of this ProductInfoProductAttributes. # noqa: E501
:type: str
"""
self._secondary_category = secondary_category
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProductInfoProductAttributes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.product_info_attributes_traits import ProductInfoAttributesTraits # noqa: F401,E501
class ProductInfoAttributes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'product_uuid': 'str',
'sku': 'str',
'traits': 'ProductInfoAttributesTraits'
}
attribute_map = {
'product_uuid': 'product_uuid',
'sku': 'sku',
'traits': 'traits'
}
def __init__(self, product_uuid=None, sku=None, traits=None): # noqa: E501
"""ProductInfoAttributes - a model defined in Swagger""" # noqa: E501
self._product_uuid = None
self._sku = None
self._traits = None
self.discriminator = None
if product_uuid is not None:
self.product_uuid = product_uuid
if sku is not None:
self.sku = sku
if traits is not None:
self.traits = traits
@property
def product_uuid(self):
"""Gets the product_uuid of this ProductInfoAttributes. # noqa: E501
:return: The product_uuid of this ProductInfoAttributes. # noqa: E501
:rtype: str
"""
return self._product_uuid
@product_uuid.setter
def product_uuid(self, product_uuid):
"""Sets the product_uuid of this ProductInfoAttributes.
:param product_uuid: The product_uuid of this ProductInfoAttributes. # noqa: E501
:type: str
"""
self._product_uuid = product_uuid
@property
def sku(self):
"""Gets the sku of this ProductInfoAttributes. # noqa: E501
:return: The sku of this ProductInfoAttributes. # noqa: E501
:rtype: str
"""
return self._sku
@sku.setter
def sku(self, sku):
"""Sets the sku of this ProductInfoAttributes.
:param sku: The sku of this ProductInfoAttributes. # noqa: E501
:type: str
"""
self._sku = sku
@property
def traits(self):
"""Gets the traits of this ProductInfoAttributes. # noqa: E501
:return: The traits of this ProductInfoAttributes. # noqa: E501
:rtype: ProductInfoAttributesTraits
"""
return self._traits
@traits.setter
def traits(self, traits):
"""Sets the traits of this ProductInfoAttributes.
:param traits: The traits of this ProductInfoAttributes. # noqa: E501
:type: ProductInfoAttributesTraits
"""
self._traits = traits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProductInfoAttributes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MarketDataMarket(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'product_id': 'int',
'sku_uuid': 'str',
'product_uuid': 'str',
'lowest_ask': 'int',
'lowest_ask_size': 'str',
'parent_lowest_ask': 'int',
'number_of_asks': 'int',
'sales_this_period': 'int',
'sales_last_period': 'int',
'highest_bid': 'int',
'highest_bid_size': 'str',
'number_of_bids': 'int',
'annual_high': 'int',
'annual_low': 'int',
'deadstock_range_low': 'int',
'deadstock_range_high': 'int',
'volatility': 'float',
'deadstock_sold': 'int',
'price_premium': 'float',
'average_deadstock_price': 'int',
'last_sale': 'int',
'last_sale_size': 'str',
'sales_last72_hours': 'int',
'change_value': 'int',
'change_percentage': 'float',
'abs_change_percentage': 'float',
'total_dollars': 'int',
'updated_at': 'int',
'last_lowest_ask_time': 'int',
'last_highest_bid_time': 'int',
'last_sale_date': 'str',
'created_at': 'str',
'deadstock_sold_rank': 'int',
'price_premium_rank': 'int',
'average_deadstock_price_rank': 'int',
'featured': 'str'
}
attribute_map = {
'product_id': 'productId',
'sku_uuid': 'skuUuid',
'product_uuid': 'productUuid',
'lowest_ask': 'lowestAsk',
'lowest_ask_size': 'lowestAskSize',
'parent_lowest_ask': 'parentLowestAsk',
'number_of_asks': 'numberOfAsks',
'sales_this_period': 'salesThisPeriod',
'sales_last_period': 'salesLastPeriod',
'highest_bid': 'highestBid',
'highest_bid_size': 'highestBidSize',
'number_of_bids': 'numberOfBids',
'annual_high': 'annualHigh',
'annual_low': 'annualLow',
'deadstock_range_low': 'deadstockRangeLow',
'deadstock_range_high': 'deadstockRangeHigh',
'volatility': 'volatility',
'deadstock_sold': 'deadstockSold',
'price_premium': 'pricePremium',
'average_deadstock_price': 'averageDeadstockPrice',
'last_sale': 'lastSale',
'last_sale_size': 'lastSaleSize',
'sales_last72_hours': 'salesLast72Hours',
'change_value': 'changeValue',
'change_percentage': 'changePercentage',
'abs_change_percentage': 'absChangePercentage',
'total_dollars': 'totalDollars',
'updated_at': 'updatedAt',
'last_lowest_ask_time': 'lastLowestAskTime',
'last_highest_bid_time': 'lastHighestBidTime',
'last_sale_date': 'lastSaleDate',
'created_at': 'createdAt',
'deadstock_sold_rank': 'deadstockSoldRank',
'price_premium_rank': 'pricePremiumRank',
'average_deadstock_price_rank': 'averageDeadstockPriceRank',
'featured': 'featured'
}
def __init__(self, product_id=None, sku_uuid=None, product_uuid=None, lowest_ask=None, lowest_ask_size=None, parent_lowest_ask=None, number_of_asks=None, sales_this_period=None, sales_last_period=None, highest_bid=None, highest_bid_size=None, number_of_bids=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, volatility=None, deadstock_sold=None, price_premium=None, average_deadstock_price=None, last_sale=None, last_sale_size=None, sales_last72_hours=None, change_value=None, change_percentage=None, abs_change_percentage=None, total_dollars=None, updated_at=None, last_lowest_ask_time=None, last_highest_bid_time=None, last_sale_date=None, created_at=None, deadstock_sold_rank=None, price_premium_rank=None, average_deadstock_price_rank=None, featured=None): # noqa: E501
"""MarketDataMarket - a model defined in Swagger""" # noqa: E501
self._product_id = None
self._sku_uuid = None
self._product_uuid = None
self._lowest_ask = None
self._lowest_ask_size = None
self._parent_lowest_ask = None
self._number_of_asks = None
self._sales_this_period = None
self._sales_last_period = None
self._highest_bid = None
self._highest_bid_size = None
self._number_of_bids = None
self._annual_high = None
self._annual_low = None
self._deadstock_range_low = None
self._deadstock_range_high = None
self._volatility = None
self._deadstock_sold = None
self._price_premium = None
self._average_deadstock_price = None
self._last_sale = None
self._last_sale_size = None
self._sales_last72_hours = None
self._change_value = None
self._change_percentage = None
self._abs_change_percentage = None
self._total_dollars = None
self._updated_at = None
self._last_lowest_ask_time = None
self._last_highest_bid_time = None
self._last_sale_date = None
self._created_at = None
self._deadstock_sold_rank = None
self._price_premium_rank = None
self._average_deadstock_price_rank = None
self._featured = None
self.discriminator = None
if product_id is not None:
self.product_id = product_id
if sku_uuid is not None:
self.sku_uuid = sku_uuid
if product_uuid is not None:
self.product_uuid = product_uuid
if lowest_ask is not None:
self.lowest_ask = lowest_ask
if lowest_ask_size is not None:
self.lowest_ask_size = lowest_ask_size
if parent_lowest_ask is not None:
self.parent_lowest_ask = parent_lowest_ask
if number_of_asks is not None:
self.number_of_asks = number_of_asks
if sales_this_period is not None:
self.sales_this_period = sales_this_period
if sales_last_period is not None:
self.sales_last_period = sales_last_period
if highest_bid is not None:
self.highest_bid = highest_bid
if highest_bid_size is not None:
self.highest_bid_size = highest_bid_size
if number_of_bids is not None:
self.number_of_bids = number_of_bids
if annual_high is not None:
self.annual_high = annual_high
if annual_low is not None:
self.annual_low = annual_low
if deadstock_range_low is not None:
self.deadstock_range_low = deadstock_range_low
if deadstock_range_high is not None:
self.deadstock_range_high = deadstock_range_high
if volatility is not None:
self.volatility = volatility
if deadstock_sold is not None:
self.deadstock_sold = deadstock_sold
if price_premium is not None:
self.price_premium = price_premium
if average_deadstock_price is not None:
self.average_deadstock_price = average_deadstock_price
if last_sale is not None:
self.last_sale = last_sale
if last_sale_size is not None:
self.last_sale_size = last_sale_size
if sales_last72_hours is not None:
self.sales_last72_hours = sales_last72_hours
if change_value is not None:
self.change_value = change_value
if change_percentage is not None:
self.change_percentage = change_percentage
if abs_change_percentage is not None:
self.abs_change_percentage = abs_change_percentage
if total_dollars is not None:
self.total_dollars = total_dollars
if updated_at is not None:
self.updated_at = updated_at
if last_lowest_ask_time is not None:
self.last_lowest_ask_time = last_lowest_ask_time
if last_highest_bid_time is not None:
self.last_highest_bid_time = last_highest_bid_time
if last_sale_date is not None:
self.last_sale_date = last_sale_date
if created_at is not None:
self.created_at = created_at
if deadstock_sold_rank is not None:
self.deadstock_sold_rank = deadstock_sold_rank
if price_premium_rank is not None:
self.price_premium_rank = price_premium_rank
if average_deadstock_price_rank is not None:
self.average_deadstock_price_rank = average_deadstock_price_rank
if featured is not None:
self.featured = featured
@property
def product_id(self):
"""Gets the product_id of this MarketDataMarket. # noqa: E501
:return: The product_id of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this MarketDataMarket.
:param product_id: The product_id of this MarketDataMarket. # noqa: E501
:type: int
"""
self._product_id = product_id
@property
def sku_uuid(self):
"""Gets the sku_uuid of this MarketDataMarket. # noqa: E501
:return: The sku_uuid of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._sku_uuid
@sku_uuid.setter
def sku_uuid(self, sku_uuid):
"""Sets the sku_uuid of this MarketDataMarket.
:param sku_uuid: The sku_uuid of this MarketDataMarket. # noqa: E501
:type: str
"""
self._sku_uuid = sku_uuid
@property
def product_uuid(self):
"""Gets the product_uuid of this MarketDataMarket. # noqa: E501
:return: The product_uuid of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._product_uuid
@product_uuid.setter
def product_uuid(self, product_uuid):
"""Sets the product_uuid of this MarketDataMarket.
:param product_uuid: The product_uuid of this MarketDataMarket. # noqa: E501
:type: str
"""
self._product_uuid = product_uuid
@property
def lowest_ask(self):
"""Gets the lowest_ask of this MarketDataMarket. # noqa: E501
:return: The lowest_ask of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._lowest_ask
@lowest_ask.setter
def lowest_ask(self, lowest_ask):
"""Sets the lowest_ask of this MarketDataMarket.
:param lowest_ask: The lowest_ask of this MarketDataMarket. # noqa: E501
:type: int
"""
self._lowest_ask = lowest_ask
@property
def lowest_ask_size(self):
"""Gets the lowest_ask_size of this MarketDataMarket. # noqa: E501
:return: The lowest_ask_size of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._lowest_ask_size
@lowest_ask_size.setter
def lowest_ask_size(self, lowest_ask_size):
"""Sets the lowest_ask_size of this MarketDataMarket.
:param lowest_ask_size: The lowest_ask_size of this MarketDataMarket. # noqa: E501
:type: str
"""
self._lowest_ask_size = lowest_ask_size
@property
def parent_lowest_ask(self):
"""Gets the parent_lowest_ask of this MarketDataMarket. # noqa: E501
:return: The parent_lowest_ask of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._parent_lowest_ask
@parent_lowest_ask.setter
def parent_lowest_ask(self, parent_lowest_ask):
"""Sets the parent_lowest_ask of this MarketDataMarket.
:param parent_lowest_ask: The parent_lowest_ask of this MarketDataMarket. # noqa: E501
:type: int
"""
self._parent_lowest_ask = parent_lowest_ask
@property
def number_of_asks(self):
"""Gets the number_of_asks of this MarketDataMarket. # noqa: E501
:return: The number_of_asks of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._number_of_asks
@number_of_asks.setter
def number_of_asks(self, number_of_asks):
"""Sets the number_of_asks of this MarketDataMarket.
:param number_of_asks: The number_of_asks of this MarketDataMarket. # noqa: E501
:type: int
"""
self._number_of_asks = number_of_asks
@property
def sales_this_period(self):
"""Gets the sales_this_period of this MarketDataMarket. # noqa: E501
:return: The sales_this_period of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._sales_this_period
@sales_this_period.setter
def sales_this_period(self, sales_this_period):
"""Sets the sales_this_period of this MarketDataMarket.
:param sales_this_period: The sales_this_period of this MarketDataMarket. # noqa: E501
:type: int
"""
self._sales_this_period = sales_this_period
@property
def sales_last_period(self):
"""Gets the sales_last_period of this MarketDataMarket. # noqa: E501
:return: The sales_last_period of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._sales_last_period
@sales_last_period.setter
def sales_last_period(self, sales_last_period):
"""Sets the sales_last_period of this MarketDataMarket.
:param sales_last_period: The sales_last_period of this MarketDataMarket. # noqa: E501
:type: int
"""
self._sales_last_period = sales_last_period
@property
def highest_bid(self):
"""Gets the highest_bid of this MarketDataMarket. # noqa: E501
:return: The highest_bid of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._highest_bid
@highest_bid.setter
def highest_bid(self, highest_bid):
"""Sets the highest_bid of this MarketDataMarket.
:param highest_bid: The highest_bid of this MarketDataMarket. # noqa: E501
:type: int
"""
self._highest_bid = highest_bid
@property
def highest_bid_size(self):
"""Gets the highest_bid_size of this MarketDataMarket. # noqa: E501
:return: The highest_bid_size of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._highest_bid_size
@highest_bid_size.setter
def highest_bid_size(self, highest_bid_size):
"""Sets the highest_bid_size of this MarketDataMarket.
:param highest_bid_size: The highest_bid_size of this MarketDataMarket. # noqa: E501
:type: str
"""
self._highest_bid_size = highest_bid_size
@property
def number_of_bids(self):
"""Gets the number_of_bids of this MarketDataMarket. # noqa: E501
:return: The number_of_bids of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._number_of_bids
@number_of_bids.setter
def number_of_bids(self, number_of_bids):
"""Sets the number_of_bids of this MarketDataMarket.
:param number_of_bids: The number_of_bids of this MarketDataMarket. # noqa: E501
:type: int
"""
self._number_of_bids = number_of_bids
@property
def annual_high(self):
"""Gets the annual_high of this MarketDataMarket. # noqa: E501
:return: The annual_high of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._annual_high
@annual_high.setter
def annual_high(self, annual_high):
"""Sets the annual_high of this MarketDataMarket.
:param annual_high: The annual_high of this MarketDataMarket. # noqa: E501
:type: int
"""
self._annual_high = annual_high
@property
def annual_low(self):
"""Gets the annual_low of this MarketDataMarket. # noqa: E501
:return: The annual_low of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._annual_low
@annual_low.setter
def annual_low(self, annual_low):
"""Sets the annual_low of this MarketDataMarket.
:param annual_low: The annual_low of this MarketDataMarket. # noqa: E501
:type: int
"""
self._annual_low = annual_low
@property
def deadstock_range_low(self):
"""Gets the deadstock_range_low of this MarketDataMarket. # noqa: E501
:return: The deadstock_range_low of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._deadstock_range_low
@deadstock_range_low.setter
def deadstock_range_low(self, deadstock_range_low):
"""Sets the deadstock_range_low of this MarketDataMarket.
:param deadstock_range_low: The deadstock_range_low of this MarketDataMarket. # noqa: E501
:type: int
"""
self._deadstock_range_low = deadstock_range_low
@property
def deadstock_range_high(self):
"""Gets the deadstock_range_high of this MarketDataMarket. # noqa: E501
:return: The deadstock_range_high of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._deadstock_range_high
@deadstock_range_high.setter
def deadstock_range_high(self, deadstock_range_high):
"""Sets the deadstock_range_high of this MarketDataMarket.
:param deadstock_range_high: The deadstock_range_high of this MarketDataMarket. # noqa: E501
:type: int
"""
self._deadstock_range_high = deadstock_range_high
@property
def volatility(self):
"""Gets the volatility of this MarketDataMarket. # noqa: E501
:return: The volatility of this MarketDataMarket. # noqa: E501
:rtype: float
"""
return self._volatility
@volatility.setter
def volatility(self, volatility):
"""Sets the volatility of this MarketDataMarket.
:param volatility: The volatility of this MarketDataMarket. # noqa: E501
:type: float
"""
self._volatility = volatility
@property
def deadstock_sold(self):
"""Gets the deadstock_sold of this MarketDataMarket. # noqa: E501
:return: The deadstock_sold of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._deadstock_sold
@deadstock_sold.setter
def deadstock_sold(self, deadstock_sold):
"""Sets the deadstock_sold of this MarketDataMarket.
:param deadstock_sold: The deadstock_sold of this MarketDataMarket. # noqa: E501
:type: int
"""
self._deadstock_sold = deadstock_sold
@property
def price_premium(self):
"""Gets the price_premium of this MarketDataMarket. # noqa: E501
:return: The price_premium of this MarketDataMarket. # noqa: E501
:rtype: float
"""
return self._price_premium
@price_premium.setter
def price_premium(self, price_premium):
"""Sets the price_premium of this MarketDataMarket.
:param price_premium: The price_premium of this MarketDataMarket. # noqa: E501
:type: float
"""
self._price_premium = price_premium
@property
def average_deadstock_price(self):
"""Gets the average_deadstock_price of this MarketDataMarket. # noqa: E501
:return: The average_deadstock_price of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._average_deadstock_price
@average_deadstock_price.setter
def average_deadstock_price(self, average_deadstock_price):
"""Sets the average_deadstock_price of this MarketDataMarket.
:param average_deadstock_price: The average_deadstock_price of this MarketDataMarket. # noqa: E501
:type: int
"""
self._average_deadstock_price = average_deadstock_price
@property
def last_sale(self):
"""Gets the last_sale of this MarketDataMarket. # noqa: E501
:return: The last_sale of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._last_sale
@last_sale.setter
def last_sale(self, last_sale):
"""Sets the last_sale of this MarketDataMarket.
:param last_sale: The last_sale of this MarketDataMarket. # noqa: E501
:type: int
"""
self._last_sale = last_sale
@property
def last_sale_size(self):
"""Gets the last_sale_size of this MarketDataMarket. # noqa: E501
:return: The last_sale_size of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._last_sale_size
@last_sale_size.setter
def last_sale_size(self, last_sale_size):
"""Sets the last_sale_size of this MarketDataMarket.
:param last_sale_size: The last_sale_size of this MarketDataMarket. # noqa: E501
:type: str
"""
self._last_sale_size = last_sale_size
@property
def sales_last72_hours(self):
"""Gets the sales_last72_hours of this MarketDataMarket. # noqa: E501
:return: The sales_last72_hours of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._sales_last72_hours
@sales_last72_hours.setter
def sales_last72_hours(self, sales_last72_hours):
"""Sets the sales_last72_hours of this MarketDataMarket.
:param sales_last72_hours: The sales_last72_hours of this MarketDataMarket. # noqa: E501
:type: int
"""
self._sales_last72_hours = sales_last72_hours
@property
def change_value(self):
"""Gets the change_value of this MarketDataMarket. # noqa: E501
:return: The change_value of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._change_value
@change_value.setter
def change_value(self, change_value):
"""Sets the change_value of this MarketDataMarket.
:param change_value: The change_value of this MarketDataMarket. # noqa: E501
:type: int
"""
self._change_value = change_value
@property
def change_percentage(self):
"""Gets the change_percentage of this MarketDataMarket. # noqa: E501
:return: The change_percentage of this MarketDataMarket. # noqa: E501
:rtype: float
"""
return self._change_percentage
@change_percentage.setter
def change_percentage(self, change_percentage):
"""Sets the change_percentage of this MarketDataMarket.
:param change_percentage: The change_percentage of this MarketDataMarket. # noqa: E501
:type: float
"""
self._change_percentage = change_percentage
@property
def abs_change_percentage(self):
"""Gets the abs_change_percentage of this MarketDataMarket. # noqa: E501
:return: The abs_change_percentage of this MarketDataMarket. # noqa: E501
:rtype: float
"""
return self._abs_change_percentage
@abs_change_percentage.setter
def abs_change_percentage(self, abs_change_percentage):
"""Sets the abs_change_percentage of this MarketDataMarket.
:param abs_change_percentage: The abs_change_percentage of this MarketDataMarket. # noqa: E501
:type: float
"""
self._abs_change_percentage = abs_change_percentage
@property
def total_dollars(self):
"""Gets the total_dollars of this MarketDataMarket. # noqa: E501
:return: The total_dollars of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._total_dollars
@total_dollars.setter
def total_dollars(self, total_dollars):
"""Sets the total_dollars of this MarketDataMarket.
:param total_dollars: The total_dollars of this MarketDataMarket. # noqa: E501
:type: int
"""
self._total_dollars = total_dollars
@property
def updated_at(self):
"""Gets the updated_at of this MarketDataMarket. # noqa: E501
:return: The updated_at of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this MarketDataMarket.
:param updated_at: The updated_at of this MarketDataMarket. # noqa: E501
:type: int
"""
self._updated_at = updated_at
@property
def last_lowest_ask_time(self):
"""Gets the last_lowest_ask_time of this MarketDataMarket. # noqa: E501
:return: The last_lowest_ask_time of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._last_lowest_ask_time
@last_lowest_ask_time.setter
def last_lowest_ask_time(self, last_lowest_ask_time):
"""Sets the last_lowest_ask_time of this MarketDataMarket.
:param last_lowest_ask_time: The last_lowest_ask_time of this MarketDataMarket. # noqa: E501
:type: int
"""
self._last_lowest_ask_time = last_lowest_ask_time
@property
def last_highest_bid_time(self):
"""Gets the last_highest_bid_time of this MarketDataMarket. # noqa: E501
:return: The last_highest_bid_time of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._last_highest_bid_time
@last_highest_bid_time.setter
def last_highest_bid_time(self, last_highest_bid_time):
"""Sets the last_highest_bid_time of this MarketDataMarket.
:param last_highest_bid_time: The last_highest_bid_time of this MarketDataMarket. # noqa: E501
:type: int
"""
self._last_highest_bid_time = last_highest_bid_time
@property
def last_sale_date(self):
"""Gets the last_sale_date of this MarketDataMarket. # noqa: E501
:return: The last_sale_date of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._last_sale_date
@last_sale_date.setter
def last_sale_date(self, last_sale_date):
"""Sets the last_sale_date of this MarketDataMarket.
:param last_sale_date: The last_sale_date of this MarketDataMarket. # noqa: E501
:type: str
"""
self._last_sale_date = last_sale_date
@property
def created_at(self):
"""Gets the created_at of this MarketDataMarket. # noqa: E501
:return: The created_at of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this MarketDataMarket.
:param created_at: The created_at of this MarketDataMarket. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def deadstock_sold_rank(self):
"""Gets the deadstock_sold_rank of this MarketDataMarket. # noqa: E501
:return: The deadstock_sold_rank of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._deadstock_sold_rank
@deadstock_sold_rank.setter
def deadstock_sold_rank(self, deadstock_sold_rank):
"""Sets the deadstock_sold_rank of this MarketDataMarket.
:param deadstock_sold_rank: The deadstock_sold_rank of this MarketDataMarket. # noqa: E501
:type: int
"""
self._deadstock_sold_rank = deadstock_sold_rank
@property
def price_premium_rank(self):
"""Gets the price_premium_rank of this MarketDataMarket. # noqa: E501
:return: The price_premium_rank of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._price_premium_rank
@price_premium_rank.setter
def price_premium_rank(self, price_premium_rank):
"""Sets the price_premium_rank of this MarketDataMarket.
:param price_premium_rank: The price_premium_rank of this MarketDataMarket. # noqa: E501
:type: int
"""
self._price_premium_rank = price_premium_rank
@property
def average_deadstock_price_rank(self):
"""Gets the average_deadstock_price_rank of this MarketDataMarket. # noqa: E501
:return: The average_deadstock_price_rank of this MarketDataMarket. # noqa: E501
:rtype: int
"""
return self._average_deadstock_price_rank
@average_deadstock_price_rank.setter
def average_deadstock_price_rank(self, average_deadstock_price_rank):
"""Sets the average_deadstock_price_rank of this MarketDataMarket.
:param average_deadstock_price_rank: The average_deadstock_price_rank of this MarketDataMarket. # noqa: E501
:type: int
"""
self._average_deadstock_price_rank = average_deadstock_price_rank
@property
def featured(self):
"""Gets the featured of this MarketDataMarket. # noqa: E501
:return: The featured of this MarketDataMarket. # noqa: E501
:rtype: str
"""
return self._featured
@featured.setter
def featured(self, featured):
"""Sets the featured of this MarketDataMarket.
:param featured: The featured of this MarketDataMarket. # noqa: E501
:type: str
"""
self._featured = featured
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MarketDataMarket):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_meta import PortfolioIdDelResponsePortfolioItemProductMeta # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_shipping import PortfolioIdDelResponsePortfolioItemProductShipping # noqa: F401,E501
from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product_market import PortfolioitemsIdGetResponsePortfolioItemProductMarket # noqa: F401,E501
class PortfolioitemsIdGetResponsePortfolioItemProduct(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uuid': 'str',
'brand': 'str',
'category': 'str',
'charity_condition': 'int',
'colorway': 'str',
'condition': 'str',
'country_of_manufacture': 'str',
'gender': 'str',
'content_group': 'str',
'minimum_bid': 'int',
'media': 'PortfolioIdDelResponsePortfolioItemProductMedia',
'name': 'str',
'primary_category': 'str',
'secondary_category': 'str',
'product_category': 'str',
'release_date': 'str',
'retail_price': 'int',
'shoe': 'str',
'short_description': 'str',
'style_id': 'str',
'ticker_symbol': 'str',
'title': 'str',
'data_type': 'str',
'type': 'int',
'size_title': 'str',
'size_descriptor': 'str',
'size_all_descriptor': 'str',
'url_key': 'str',
'year': 'str',
'shipping_group': 'str',
'a_lim': 'int',
'meta': 'PortfolioIdDelResponsePortfolioItemProductMeta',
'shipping': 'PortfolioIdDelResponsePortfolioItemProductShipping',
'children': 'object',
'parent_id': 'str',
'parent_uuid': 'str',
'size_sort_order': 'int',
'shoe_size': 'str',
'market': 'PortfolioitemsIdGetResponsePortfolioItemProductMarket',
'upc': 'str'
}
attribute_map = {
'id': 'id',
'uuid': 'uuid',
'brand': 'brand',
'category': 'category',
'charity_condition': 'charityCondition',
'colorway': 'colorway',
'condition': 'condition',
'country_of_manufacture': 'countryOfManufacture',
'gender': 'gender',
'content_group': 'contentGroup',
'minimum_bid': 'minimumBid',
'media': 'media',
'name': 'name',
'primary_category': 'primaryCategory',
'secondary_category': 'secondaryCategory',
'product_category': 'productCategory',
'release_date': 'releaseDate',
'retail_price': 'retailPrice',
'shoe': 'shoe',
'short_description': 'shortDescription',
'style_id': 'styleId',
'ticker_symbol': 'tickerSymbol',
'title': 'title',
'data_type': 'dataType',
'type': 'type',
'size_title': 'sizeTitle',
'size_descriptor': 'sizeDescriptor',
'size_all_descriptor': 'sizeAllDescriptor',
'url_key': 'urlKey',
'year': 'year',
'shipping_group': 'shippingGroup',
'a_lim': 'aLim',
'meta': 'meta',
'shipping': 'shipping',
'children': 'children',
'parent_id': 'parentId',
'parent_uuid': 'parentUuid',
'size_sort_order': 'sizeSortOrder',
'shoe_size': 'shoeSize',
'market': 'market',
'upc': 'upc'
}
def __init__(self, id=None, uuid=None, brand=None, category=None, charity_condition=None, colorway=None, condition=None, country_of_manufacture=None, gender=None, content_group=None, minimum_bid=None, media=None, name=None, primary_category=None, secondary_category=None, product_category=None, release_date=None, retail_price=None, shoe=None, short_description=None, style_id=None, ticker_symbol=None, title=None, data_type=None, type=None, size_title=None, size_descriptor=None, size_all_descriptor=None, url_key=None, year=None, shipping_group=None, a_lim=None, meta=None, shipping=None, children=None, parent_id=None, parent_uuid=None, size_sort_order=None, shoe_size=None, market=None, upc=None): # noqa: E501
"""PortfolioitemsIdGetResponsePortfolioItemProduct - a model defined in Swagger""" # noqa: E501
self._id = None
self._uuid = None
self._brand = None
self._category = None
self._charity_condition = None
self._colorway = None
self._condition = None
self._country_of_manufacture = None
self._gender = None
self._content_group = None
self._minimum_bid = None
self._media = None
self._name = None
self._primary_category = None
self._secondary_category = None
self._product_category = None
self._release_date = None
self._retail_price = None
self._shoe = None
self._short_description = None
self._style_id = None
self._ticker_symbol = None
self._title = None
self._data_type = None
self._type = None
self._size_title = None
self._size_descriptor = None
self._size_all_descriptor = None
self._url_key = None
self._year = None
self._shipping_group = None
self._a_lim = None
self._meta = None
self._shipping = None
self._children = None
self._parent_id = None
self._parent_uuid = None
self._size_sort_order = None
self._shoe_size = None
self._market = None
self._upc = None
self.discriminator = None
self.id = id
self.uuid = uuid
self.brand = brand
self.category = category
self.charity_condition = charity_condition
self.colorway = colorway
self.condition = condition
self.country_of_manufacture = country_of_manufacture
self.gender = gender
self.content_group = content_group
self.minimum_bid = minimum_bid
self.media = media
self.name = name
self.primary_category = primary_category
self.secondary_category = secondary_category
self.product_category = product_category
self.release_date = release_date
self.retail_price = retail_price
self.shoe = shoe
self.short_description = short_description
self.style_id = style_id
self.ticker_symbol = ticker_symbol
self.title = title
self.data_type = data_type
self.type = type
self.size_title = size_title
self.size_descriptor = size_descriptor
self.size_all_descriptor = size_all_descriptor
self.url_key = url_key
self.year = year
self.shipping_group = shipping_group
self.a_lim = a_lim
self.meta = meta
self.shipping = shipping
self.children = children
self.parent_id = parent_id
self.parent_uuid = parent_uuid
self.size_sort_order = size_sort_order
self.shoe_size = shoe_size
self.market = market
self.upc = upc
@property
def id(self):
"""Gets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param id: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def uuid(self):
"""Gets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param uuid: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if uuid is None:
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
self._uuid = uuid
@property
def brand(self):
"""Gets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param brand: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if brand is None:
raise ValueError("Invalid value for `brand`, must not be `None`") # noqa: E501
self._brand = brand
@property
def category(self):
"""Gets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param category: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if category is None:
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def charity_condition(self):
"""Gets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._charity_condition
@charity_condition.setter
def charity_condition(self, charity_condition):
"""Sets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param charity_condition: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if charity_condition is None:
raise ValueError("Invalid value for `charity_condition`, must not be `None`") # noqa: E501
self._charity_condition = charity_condition
@property
def colorway(self):
"""Gets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param colorway: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if colorway is None:
raise ValueError("Invalid value for `colorway`, must not be `None`") # noqa: E501
self._colorway = colorway
@property
def condition(self):
"""Gets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param condition: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if condition is None:
raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501
self._condition = condition
@property
def country_of_manufacture(self):
"""Gets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._country_of_manufacture
@country_of_manufacture.setter
def country_of_manufacture(self, country_of_manufacture):
"""Sets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param country_of_manufacture: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if country_of_manufacture is None:
raise ValueError("Invalid value for `country_of_manufacture`, must not be `None`") # noqa: E501
self._country_of_manufacture = country_of_manufacture
@property
def gender(self):
"""Gets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param gender: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if gender is None:
raise ValueError("Invalid value for `gender`, must not be `None`") # noqa: E501
self._gender = gender
@property
def content_group(self):
"""Gets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._content_group
@content_group.setter
def content_group(self, content_group):
"""Sets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param content_group: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if content_group is None:
raise ValueError("Invalid value for `content_group`, must not be `None`") # noqa: E501
self._content_group = content_group
@property
def minimum_bid(self):
"""Gets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._minimum_bid
@minimum_bid.setter
def minimum_bid(self, minimum_bid):
"""Sets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param minimum_bid: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if minimum_bid is None:
raise ValueError("Invalid value for `minimum_bid`, must not be `None`") # noqa: E501
self._minimum_bid = minimum_bid
@property
def media(self):
"""Gets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductMedia
"""
return self._media
@media.setter
def media(self, media):
"""Sets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param media: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductMedia
"""
if media is None:
raise ValueError("Invalid value for `media`, must not be `None`") # noqa: E501
self._media = media
@property
def name(self):
"""Gets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param name: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def primary_category(self):
"""Gets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._primary_category
@primary_category.setter
def primary_category(self, primary_category):
"""Sets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param primary_category: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if primary_category is None:
raise ValueError("Invalid value for `primary_category`, must not be `None`") # noqa: E501
self._primary_category = primary_category
@property
def secondary_category(self):
"""Gets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._secondary_category
@secondary_category.setter
def secondary_category(self, secondary_category):
"""Sets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param secondary_category: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if secondary_category is None:
raise ValueError("Invalid value for `secondary_category`, must not be `None`") # noqa: E501
self._secondary_category = secondary_category
@property
def product_category(self):
"""Gets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param product_category: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if product_category is None:
raise ValueError("Invalid value for `product_category`, must not be `None`") # noqa: E501
self._product_category = product_category
@property
def release_date(self):
"""Gets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param release_date: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if release_date is None:
raise ValueError("Invalid value for `release_date`, must not be `None`") # noqa: E501
self._release_date = release_date
@property
def retail_price(self):
"""Gets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._retail_price
@retail_price.setter
def retail_price(self, retail_price):
"""Sets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param retail_price: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if retail_price is None:
raise ValueError("Invalid value for `retail_price`, must not be `None`") # noqa: E501
self._retail_price = retail_price
@property
def shoe(self):
"""Gets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shoe
@shoe.setter
def shoe(self, shoe):
"""Sets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shoe: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shoe is None:
raise ValueError("Invalid value for `shoe`, must not be `None`") # noqa: E501
self._shoe = shoe
@property
def short_description(self):
"""Gets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param short_description: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if short_description is None:
raise ValueError("Invalid value for `short_description`, must not be `None`") # noqa: E501
self._short_description = short_description
@property
def style_id(self):
"""Gets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param style_id: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if style_id is None:
raise ValueError("Invalid value for `style_id`, must not be `None`") # noqa: E501
self._style_id = style_id
@property
def ticker_symbol(self):
"""Gets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._ticker_symbol
@ticker_symbol.setter
def ticker_symbol(self, ticker_symbol):
"""Sets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param ticker_symbol: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if ticker_symbol is None:
raise ValueError("Invalid value for `ticker_symbol`, must not be `None`") # noqa: E501
self._ticker_symbol = ticker_symbol
@property
def title(self):
"""Gets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param title: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def data_type(self):
"""Gets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param data_type: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if data_type is None:
raise ValueError("Invalid value for `data_type`, must not be `None`") # noqa: E501
self._data_type = data_type
@property
def type(self):
"""Gets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param type: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def size_title(self):
"""Gets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_title
@size_title.setter
def size_title(self, size_title):
"""Sets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_title: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_title is None:
raise ValueError("Invalid value for `size_title`, must not be `None`") # noqa: E501
self._size_title = size_title
@property
def size_descriptor(self):
"""Gets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_descriptor
@size_descriptor.setter
def size_descriptor(self, size_descriptor):
"""Sets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_descriptor: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_descriptor is None:
raise ValueError("Invalid value for `size_descriptor`, must not be `None`") # noqa: E501
self._size_descriptor = size_descriptor
@property
def size_all_descriptor(self):
"""Gets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_all_descriptor
@size_all_descriptor.setter
def size_all_descriptor(self, size_all_descriptor):
"""Sets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_all_descriptor: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_all_descriptor is None:
raise ValueError("Invalid value for `size_all_descriptor`, must not be `None`") # noqa: E501
self._size_all_descriptor = size_all_descriptor
@property
def url_key(self):
"""Gets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._url_key
@url_key.setter
def url_key(self, url_key):
"""Sets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param url_key: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if url_key is None:
raise ValueError("Invalid value for `url_key`, must not be `None`") # noqa: E501
self._url_key = url_key
@property
def year(self):
"""Gets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._year
@year.setter
def year(self, year):
"""Sets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param year: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if year is None:
raise ValueError("Invalid value for `year`, must not be `None`") # noqa: E501
self._year = year
@property
def shipping_group(self):
"""Gets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shipping_group
@shipping_group.setter
def shipping_group(self, shipping_group):
"""Sets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shipping_group: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shipping_group is None:
raise ValueError("Invalid value for `shipping_group`, must not be `None`") # noqa: E501
self._shipping_group = shipping_group
@property
def a_lim(self):
"""Gets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._a_lim
@a_lim.setter
def a_lim(self, a_lim):
"""Sets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param a_lim: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if a_lim is None:
raise ValueError("Invalid value for `a_lim`, must not be `None`") # noqa: E501
self._a_lim = a_lim
@property
def meta(self):
"""Gets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param meta: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductMeta
"""
if meta is None:
raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501
self._meta = meta
@property
def shipping(self):
"""Gets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductShipping
"""
return self._shipping
@shipping.setter
def shipping(self, shipping):
"""Sets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shipping: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductShipping
"""
if shipping is None:
raise ValueError("Invalid value for `shipping`, must not be `None`") # noqa: E501
self._shipping = shipping
@property
def children(self):
"""Gets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: object
"""
return self._children
@children.setter
def children(self, children):
"""Sets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param children: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: object
"""
if children is None:
raise ValueError("Invalid value for `children`, must not be `None`") # noqa: E501
self._children = children
@property
def parent_id(self):
"""Gets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param parent_id: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if parent_id is None:
raise ValueError("Invalid value for `parent_id`, must not be `None`") # noqa: E501
self._parent_id = parent_id
@property
def parent_uuid(self):
"""Gets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._parent_uuid
@parent_uuid.setter
def parent_uuid(self, parent_uuid):
"""Sets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param parent_uuid: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if parent_uuid is None:
raise ValueError("Invalid value for `parent_uuid`, must not be `None`") # noqa: E501
self._parent_uuid = parent_uuid
@property
def size_sort_order(self):
"""Gets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._size_sort_order
@size_sort_order.setter
def size_sort_order(self, size_sort_order):
"""Sets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_sort_order: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if size_sort_order is None:
raise ValueError("Invalid value for `size_sort_order`, must not be `None`") # noqa: E501
self._size_sort_order = size_sort_order
@property
def shoe_size(self):
"""Gets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shoe_size
@shoe_size.setter
def shoe_size(self, shoe_size):
"""Sets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shoe_size: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shoe_size is None:
raise ValueError("Invalid value for `shoe_size`, must not be `None`") # noqa: E501
self._shoe_size = shoe_size
@property
def market(self):
"""Gets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioitemsIdGetResponsePortfolioItemProductMarket
"""
return self._market
@market.setter
def market(self, market):
"""Sets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param market: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioitemsIdGetResponsePortfolioItemProductMarket
"""
if market is None:
raise ValueError("Invalid value for `market`, must not be `None`") # noqa: E501
self._market = market
@property
def upc(self):
"""Gets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._upc
@upc.setter
def upc(self, upc):
"""Sets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param upc: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if upc is None:
raise ValueError("Invalid value for `upc`, must not be `None`") # noqa: E501
self._upc = upc
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioitemsIdGetResponsePortfolioItemProduct):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.billing_object import BillingObject # noqa: F401,E501
from io_stockx.models.customer_object_merchant import CustomerObjectMerchant # noqa: F401,E501
from io_stockx.models.customer_object_security import CustomerObjectSecurity # noqa: F401,E501
from io_stockx.models.customer_object_shipping import CustomerObjectShipping # noqa: F401,E501
class CustomerObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uuid': 'str',
'first_name': 'str',
'last_name': 'str',
'full_name': 'str',
'email': 'str',
'username': 'str',
'email_verified': 'bool',
'default_size': 'str',
'categories': 'list[str]',
'default_category': 'str',
'vacation_date': 'str',
'is_active': 'bool',
'flagged': 'bool',
'hide_portfolio_banner': 'bool',
'refer_url': 'str',
'created_at': 'str',
'created_at_time': 'float',
'is_trader': 'bool',
'ship_by_date': 'bool',
'is_buying': 'bool',
'is_selling': 'bool',
'billing': 'BillingObject',
'shipping': 'CustomerObjectShipping',
'cc_only': 'BillingObject',
'merchant': 'CustomerObjectMerchant',
'promotion_code': 'str',
'paypal_emails': 'str',
'authorization_method': 'str',
'security_override': 'bool',
'team_member': 'bool',
'password_locked': 'bool',
'address_normalize_override': 'bool',
'early_payout_enabled': 'bool',
'early_payout_eligible': 'bool',
'security': 'CustomerObjectSecurity'
}
attribute_map = {
'id': 'id',
'uuid': 'uuid',
'first_name': 'firstName',
'last_name': 'lastName',
'full_name': 'fullName',
'email': 'email',
'username': 'username',
'email_verified': 'emailVerified',
'default_size': 'defaultSize',
'categories': 'categories',
'default_category': 'defaultCategory',
'vacation_date': 'vacationDate',
'is_active': 'isActive',
'flagged': 'flagged',
'hide_portfolio_banner': 'hidePortfolioBanner',
'refer_url': 'referUrl',
'created_at': 'createdAt',
'created_at_time': 'createdAtTime',
'is_trader': 'isTrader',
'ship_by_date': 'shipByDate',
'is_buying': 'isBuying',
'is_selling': 'isSelling',
'billing': 'Billing',
'shipping': 'Shipping',
'cc_only': 'CCOnly',
'merchant': 'Merchant',
'promotion_code': 'promotionCode',
'paypal_emails': 'paypalEmails',
'authorization_method': 'authorizationMethod',
'security_override': 'securityOverride',
'team_member': 'teamMember',
'password_locked': 'passwordLocked',
'address_normalize_override': 'addressNormalizeOverride',
'early_payout_enabled': 'earlyPayoutEnabled',
'early_payout_eligible': 'earlyPayoutEligible',
'security': 'security'
}
def __init__(self, id=None, uuid=None, first_name=None, last_name=None, full_name=None, email=None, username=None, email_verified=None, default_size=None, categories=None, default_category=None, vacation_date=None, is_active=None, flagged=None, hide_portfolio_banner=None, refer_url=None, created_at=None, created_at_time=None, is_trader=None, ship_by_date=None, is_buying=None, is_selling=None, billing=None, shipping=None, cc_only=None, merchant=None, promotion_code=None, paypal_emails=None, authorization_method=None, security_override=None, team_member=None, password_locked=None, address_normalize_override=None, early_payout_enabled=None, early_payout_eligible=None, security=None): # noqa: E501
"""CustomerObject - a model defined in Swagger""" # noqa: E501
self._id = None
self._uuid = None
self._first_name = None
self._last_name = None
self._full_name = None
self._email = None
self._username = None
self._email_verified = None
self._default_size = None
self._categories = None
self._default_category = None
self._vacation_date = None
self._is_active = None
self._flagged = None
self._hide_portfolio_banner = None
self._refer_url = None
self._created_at = None
self._created_at_time = None
self._is_trader = None
self._ship_by_date = None
self._is_buying = None
self._is_selling = None
self._billing = None
self._shipping = None
self._cc_only = None
self._merchant = None
self._promotion_code = None
self._paypal_emails = None
self._authorization_method = None
self._security_override = None
self._team_member = None
self._password_locked = None
self._address_normalize_override = None
self._early_payout_enabled = None
self._early_payout_eligible = None
self._security = None
self.discriminator = None
if id is not None:
self.id = id
if uuid is not None:
self.uuid = uuid
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if full_name is not None:
self.full_name = full_name
if email is not None:
self.email = email
if username is not None:
self.username = username
if email_verified is not None:
self.email_verified = email_verified
if default_size is not None:
self.default_size = default_size
if categories is not None:
self.categories = categories
if default_category is not None:
self.default_category = default_category
if vacation_date is not None:
self.vacation_date = vacation_date
if is_active is not None:
self.is_active = is_active
if flagged is not None:
self.flagged = flagged
if hide_portfolio_banner is not None:
self.hide_portfolio_banner = hide_portfolio_banner
if refer_url is not None:
self.refer_url = refer_url
if created_at is not None:
self.created_at = created_at
if created_at_time is not None:
self.created_at_time = created_at_time
if is_trader is not None:
self.is_trader = is_trader
if ship_by_date is not None:
self.ship_by_date = ship_by_date
if is_buying is not None:
self.is_buying = is_buying
if is_selling is not None:
self.is_selling = is_selling
if billing is not None:
self.billing = billing
if shipping is not None:
self.shipping = shipping
if cc_only is not None:
self.cc_only = cc_only
if merchant is not None:
self.merchant = merchant
if promotion_code is not None:
self.promotion_code = promotion_code
if paypal_emails is not None:
self.paypal_emails = paypal_emails
if authorization_method is not None:
self.authorization_method = authorization_method
if security_override is not None:
self.security_override = security_override
if team_member is not None:
self.team_member = team_member
if password_locked is not None:
self.password_locked = password_locked
if address_normalize_override is not None:
self.address_normalize_override = address_normalize_override
if early_payout_enabled is not None:
self.early_payout_enabled = early_payout_enabled
if early_payout_eligible is not None:
self.early_payout_eligible = early_payout_eligible
if security is not None:
self.security = security
@property
def id(self):
"""Gets the id of this CustomerObject. # noqa: E501
:return: The id of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CustomerObject.
:param id: The id of this CustomerObject. # noqa: E501
:type: str
"""
self._id = id
@property
def uuid(self):
"""Gets the uuid of this CustomerObject. # noqa: E501
:return: The uuid of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this CustomerObject.
:param uuid: The uuid of this CustomerObject. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def first_name(self):
"""Gets the first_name of this CustomerObject. # noqa: E501
:return: The first_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this CustomerObject.
:param first_name: The first_name of this CustomerObject. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this CustomerObject. # noqa: E501
:return: The last_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this CustomerObject.
:param last_name: The last_name of this CustomerObject. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def full_name(self):
"""Gets the full_name of this CustomerObject. # noqa: E501
:return: The full_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this CustomerObject.
:param full_name: The full_name of this CustomerObject. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def email(self):
"""Gets the email of this CustomerObject. # noqa: E501
:return: The email of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this CustomerObject.
:param email: The email of this CustomerObject. # noqa: E501
:type: str
"""
self._email = email
@property
def username(self):
"""Gets the username of this CustomerObject. # noqa: E501
:return: The username of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CustomerObject.
:param username: The username of this CustomerObject. # noqa: E501
:type: str
"""
self._username = username
@property
def email_verified(self):
"""Gets the email_verified of this CustomerObject. # noqa: E501
:return: The email_verified of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._email_verified
@email_verified.setter
def email_verified(self, email_verified):
"""Sets the email_verified of this CustomerObject.
:param email_verified: The email_verified of this CustomerObject. # noqa: E501
:type: bool
"""
self._email_verified = email_verified
@property
def default_size(self):
"""Gets the default_size of this CustomerObject. # noqa: E501
:return: The default_size of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._default_size
@default_size.setter
def default_size(self, default_size):
"""Sets the default_size of this CustomerObject.
:param default_size: The default_size of this CustomerObject. # noqa: E501
:type: str
"""
self._default_size = default_size
@property
def categories(self):
"""Gets the categories of this CustomerObject. # noqa: E501
:return: The categories of this CustomerObject. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this CustomerObject.
:param categories: The categories of this CustomerObject. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def default_category(self):
"""Gets the default_category of this CustomerObject. # noqa: E501
:return: The default_category of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._default_category
@default_category.setter
def default_category(self, default_category):
"""Sets the default_category of this CustomerObject.
:param default_category: The default_category of this CustomerObject. # noqa: E501
:type: str
"""
self._default_category = default_category
@property
def vacation_date(self):
"""Gets the vacation_date of this CustomerObject. # noqa: E501
:return: The vacation_date of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._vacation_date
@vacation_date.setter
def vacation_date(self, vacation_date):
"""Sets the vacation_date of this CustomerObject.
:param vacation_date: The vacation_date of this CustomerObject. # noqa: E501
:type: str
"""
self._vacation_date = vacation_date
@property
def is_active(self):
"""Gets the is_active of this CustomerObject. # noqa: E501
:return: The is_active of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active):
"""Sets the is_active of this CustomerObject.
:param is_active: The is_active of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_active = is_active
@property
def flagged(self):
"""Gets the flagged of this CustomerObject. # noqa: E501
:return: The flagged of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._flagged
@flagged.setter
def flagged(self, flagged):
"""Sets the flagged of this CustomerObject.
:param flagged: The flagged of this CustomerObject. # noqa: E501
:type: bool
"""
self._flagged = flagged
@property
def hide_portfolio_banner(self):
"""Gets the hide_portfolio_banner of this CustomerObject. # noqa: E501
:return: The hide_portfolio_banner of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._hide_portfolio_banner
@hide_portfolio_banner.setter
def hide_portfolio_banner(self, hide_portfolio_banner):
"""Sets the hide_portfolio_banner of this CustomerObject.
:param hide_portfolio_banner: The hide_portfolio_banner of this CustomerObject. # noqa: E501
:type: bool
"""
self._hide_portfolio_banner = hide_portfolio_banner
@property
def refer_url(self):
"""Gets the refer_url of this CustomerObject. # noqa: E501
:return: The refer_url of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._refer_url
@refer_url.setter
def refer_url(self, refer_url):
"""Sets the refer_url of this CustomerObject.
:param refer_url: The refer_url of this CustomerObject. # noqa: E501
:type: str
"""
self._refer_url = refer_url
@property
def created_at(self):
"""Gets the created_at of this CustomerObject. # noqa: E501
:return: The created_at of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this CustomerObject.
:param created_at: The created_at of this CustomerObject. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def created_at_time(self):
"""Gets the created_at_time of this CustomerObject. # noqa: E501
:return: The created_at_time of this CustomerObject. # noqa: E501
:rtype: float
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this CustomerObject.
:param created_at_time: The created_at_time of this CustomerObject. # noqa: E501
:type: float
"""
self._created_at_time = created_at_time
@property
def is_trader(self):
"""Gets the is_trader of this CustomerObject. # noqa: E501
:return: The is_trader of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_trader
@is_trader.setter
def is_trader(self, is_trader):
"""Sets the is_trader of this CustomerObject.
:param is_trader: The is_trader of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_trader = is_trader
@property
def ship_by_date(self):
"""Gets the ship_by_date of this CustomerObject. # noqa: E501
:return: The ship_by_date of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._ship_by_date
@ship_by_date.setter
def ship_by_date(self, ship_by_date):
"""Sets the ship_by_date of this CustomerObject.
:param ship_by_date: The ship_by_date of this CustomerObject. # noqa: E501
:type: bool
"""
self._ship_by_date = ship_by_date
@property
def is_buying(self):
"""Gets the is_buying of this CustomerObject. # noqa: E501
:return: The is_buying of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_buying
@is_buying.setter
def is_buying(self, is_buying):
"""Sets the is_buying of this CustomerObject.
:param is_buying: The is_buying of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_buying = is_buying
@property
def is_selling(self):
"""Gets the is_selling of this CustomerObject. # noqa: E501
:return: The is_selling of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_selling
@is_selling.setter
def is_selling(self, is_selling):
"""Sets the is_selling of this CustomerObject.
:param is_selling: The is_selling of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_selling = is_selling
@property
def billing(self):
"""Gets the billing of this CustomerObject. # noqa: E501
:return: The billing of this CustomerObject. # noqa: E501
:rtype: BillingObject
"""
return self._billing
@billing.setter
def billing(self, billing):
"""Sets the billing of this CustomerObject.
:param billing: The billing of this CustomerObject. # noqa: E501
:type: BillingObject
"""
self._billing = billing
@property
def shipping(self):
"""Gets the shipping of this CustomerObject. # noqa: E501
:return: The shipping of this CustomerObject. # noqa: E501
:rtype: CustomerObjectShipping
"""
return self._shipping
@shipping.setter
def shipping(self, shipping):
"""Sets the shipping of this CustomerObject.
:param shipping: The shipping of this CustomerObject. # noqa: E501
:type: CustomerObjectShipping
"""
self._shipping = shipping
@property
def cc_only(self):
"""Gets the cc_only of this CustomerObject. # noqa: E501
:return: The cc_only of this CustomerObject. # noqa: E501
:rtype: BillingObject
"""
return self._cc_only
@cc_only.setter
def cc_only(self, cc_only):
"""Sets the cc_only of this CustomerObject.
:param cc_only: The cc_only of this CustomerObject. # noqa: E501
:type: BillingObject
"""
self._cc_only = cc_only
@property
def merchant(self):
"""Gets the merchant of this CustomerObject. # noqa: E501
:return: The merchant of this CustomerObject. # noqa: E501
:rtype: CustomerObjectMerchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this CustomerObject.
:param merchant: The merchant of this CustomerObject. # noqa: E501
:type: CustomerObjectMerchant
"""
self._merchant = merchant
@property
def promotion_code(self):
"""Gets the promotion_code of this CustomerObject. # noqa: E501
:return: The promotion_code of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._promotion_code
@promotion_code.setter
def promotion_code(self, promotion_code):
"""Sets the promotion_code of this CustomerObject.
:param promotion_code: The promotion_code of this CustomerObject. # noqa: E501
:type: str
"""
self._promotion_code = promotion_code
@property
def paypal_emails(self):
"""Gets the paypal_emails of this CustomerObject. # noqa: E501
:return: The paypal_emails of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._paypal_emails
@paypal_emails.setter
def paypal_emails(self, paypal_emails):
"""Sets the paypal_emails of this CustomerObject.
:param paypal_emails: The paypal_emails of this CustomerObject. # noqa: E501
:type: str
"""
self._paypal_emails = paypal_emails
@property
def authorization_method(self):
"""Gets the authorization_method of this CustomerObject. # noqa: E501
:return: The authorization_method of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._authorization_method
@authorization_method.setter
def authorization_method(self, authorization_method):
"""Sets the authorization_method of this CustomerObject.
:param authorization_method: The authorization_method of this CustomerObject. # noqa: E501
:type: str
"""
self._authorization_method = authorization_method
@property
def security_override(self):
"""Gets the security_override of this CustomerObject. # noqa: E501
:return: The security_override of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._security_override
@security_override.setter
def security_override(self, security_override):
"""Sets the security_override of this CustomerObject.
:param security_override: The security_override of this CustomerObject. # noqa: E501
:type: bool
"""
self._security_override = security_override
@property
def team_member(self):
"""Gets the team_member of this CustomerObject. # noqa: E501
:return: The team_member of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._team_member
@team_member.setter
def team_member(self, team_member):
"""Sets the team_member of this CustomerObject.
:param team_member: The team_member of this CustomerObject. # noqa: E501
:type: bool
"""
self._team_member = team_member
@property
def password_locked(self):
"""Gets the password_locked of this CustomerObject. # noqa: E501
:return: The password_locked of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._password_locked
@password_locked.setter
def password_locked(self, password_locked):
"""Sets the password_locked of this CustomerObject.
:param password_locked: The password_locked of this CustomerObject. # noqa: E501
:type: bool
"""
self._password_locked = password_locked
@property
def address_normalize_override(self):
"""Gets the address_normalize_override of this CustomerObject. # noqa: E501
:return: The address_normalize_override of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._address_normalize_override
@address_normalize_override.setter
def address_normalize_override(self, address_normalize_override):
"""Sets the address_normalize_override of this CustomerObject.
:param address_normalize_override: The address_normalize_override of this CustomerObject. # noqa: E501
:type: bool
"""
self._address_normalize_override = address_normalize_override
@property
def early_payout_enabled(self):
"""Gets the early_payout_enabled of this CustomerObject. # noqa: E501
:return: The early_payout_enabled of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._early_payout_enabled
@early_payout_enabled.setter
def early_payout_enabled(self, early_payout_enabled):
"""Sets the early_payout_enabled of this CustomerObject.
:param early_payout_enabled: The early_payout_enabled of this CustomerObject. # noqa: E501
:type: bool
"""
self._early_payout_enabled = early_payout_enabled
@property
def early_payout_eligible(self):
"""Gets the early_payout_eligible of this CustomerObject. # noqa: E501
:return: The early_payout_eligible of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._early_payout_eligible
@early_payout_eligible.setter
def early_payout_eligible(self, early_payout_eligible):
"""Sets the early_payout_eligible of this CustomerObject.
:param early_payout_eligible: The early_payout_eligible of this CustomerObject. # noqa: E501
:type: bool
"""
self._early_payout_eligible = early_payout_eligible
@property
def security(self):
"""Gets the security of this CustomerObject. # noqa: E501
:return: The security of this CustomerObject. # noqa: E501
:rtype: CustomerObjectSecurity
"""
return self._security
@security.setter
def security(self, security):
"""Sets the security of this CustomerObject.
:param security: The security of this CustomerObject. # noqa: E501
:type: CustomerObjectSecurity
"""
self._security = security
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioIdDelResponsePortfolioItemMerchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'customer_id': 'int',
'is_robot': 'int',
'name': 'str',
'paypal_email': 'str',
'take': 'float',
'created_at': 'str',
'created_at_time': 'int',
'updated_at': 'str',
'updated_at_time': 'int'
}
attribute_map = {
'id': 'id',
'customer_id': 'customerId',
'is_robot': 'isRobot',
'name': 'name',
'paypal_email': 'paypalEmail',
'take': 'take',
'created_at': 'createdAt',
'created_at_time': 'createdAtTime',
'updated_at': 'updatedAt',
'updated_at_time': 'updatedAtTime'
}
def __init__(self, id=None, customer_id=None, is_robot=None, name=None, paypal_email=None, take=None, created_at=None, created_at_time=None, updated_at=None, updated_at_time=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItemMerchant - a model defined in Swagger""" # noqa: E501
self._id = None
self._customer_id = None
self._is_robot = None
self._name = None
self._paypal_email = None
self._take = None
self._created_at = None
self._created_at_time = None
self._updated_at = None
self._updated_at_time = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.is_robot = is_robot
self.name = name
self.paypal_email = paypal_email
self.take = take
self.created_at = created_at
self.created_at_time = created_at_time
self.updated_at = updated_at
self.updated_at_time = updated_at_time
@property
def id(self):
"""Gets the id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PortfolioIdDelResponsePortfolioItemMerchant.
:param id: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def customer_id(self):
"""Gets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant.
:param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def is_robot(self):
"""Gets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._is_robot
@is_robot.setter
def is_robot(self, is_robot):
"""Sets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant.
:param is_robot: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if is_robot is None:
raise ValueError("Invalid value for `is_robot`, must not be `None`") # noqa: E501
self._is_robot = is_robot
@property
def name(self):
"""Gets the name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PortfolioIdDelResponsePortfolioItemMerchant.
:param name: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def paypal_email(self):
"""Gets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._paypal_email
@paypal_email.setter
def paypal_email(self, paypal_email):
"""Sets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant.
:param paypal_email: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if paypal_email is None:
raise ValueError("Invalid value for `paypal_email`, must not be `None`") # noqa: E501
self._paypal_email = paypal_email
@property
def take(self):
"""Gets the take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: float
"""
return self._take
@take.setter
def take(self, take):
"""Sets the take of this PortfolioIdDelResponsePortfolioItemMerchant.
:param take: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: float
"""
if take is None:
raise ValueError("Invalid value for `take`, must not be `None`") # noqa: E501
self._take = take
@property
def created_at(self):
"""Gets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant.
:param created_at: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def created_at_time(self):
"""Gets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant.
:param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if created_at_time is None:
raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501
self._created_at_time = created_at_time
@property
def updated_at(self):
"""Gets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant.
:param updated_at: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def updated_at_time(self):
"""Gets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._updated_at_time
@updated_at_time.setter
def updated_at_time(self, updated_at_time):
"""Sets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant.
:param updated_at_time: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if updated_at_time is None:
raise ValueError("Invalid value for `updated_at_time`, must not be `None`") # noqa: E501
self._updated_at_time = updated_at_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItemMerchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SearchHitSearchableTraits(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'style': 'str',
'colorway': 'str',
'retail_price': 'int',
'release_date': 'str'
}
attribute_map = {
'style': 'Style',
'colorway': 'Colorway',
'retail_price': 'Retail Price',
'release_date': 'Release Date'
}
def __init__(self, style=None, colorway=None, retail_price=None, release_date=None): # noqa: E501
"""SearchHitSearchableTraits - a model defined in Swagger""" # noqa: E501
self._style = None
self._colorway = None
self._retail_price = None
self._release_date = None
self.discriminator = None
if style is not None:
self.style = style
if colorway is not None:
self.colorway = colorway
if retail_price is not None:
self.retail_price = retail_price
if release_date is not None:
self.release_date = release_date
@property
def style(self):
"""Gets the style of this SearchHitSearchableTraits. # noqa: E501
:return: The style of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._style
@style.setter
def style(self, style):
"""Sets the style of this SearchHitSearchableTraits.
:param style: The style of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._style = style
@property
def colorway(self):
"""Gets the colorway of this SearchHitSearchableTraits. # noqa: E501
:return: The colorway of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this SearchHitSearchableTraits.
:param colorway: The colorway of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._colorway = colorway
@property
def retail_price(self):
"""Gets the retail_price of this SearchHitSearchableTraits. # noqa: E501
:return: The retail_price of this SearchHitSearchableTraits. # noqa: E501
:rtype: int
"""
return self._retail_price
@retail_price.setter
def retail_price(self, retail_price):
"""Sets the retail_price of this SearchHitSearchableTraits.
:param retail_price: The retail_price of this SearchHitSearchableTraits. # noqa: E501
:type: int
"""
self._retail_price = retail_price
@property
def release_date(self):
"""Gets the release_date of this SearchHitSearchableTraits. # noqa: E501
:return: The release_date of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this SearchHitSearchableTraits.
:param release_date: The release_date of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._release_date = release_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHitSearchableTraits):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [],
"imports": [
"/sdk/python/lib/io_stockx/models/customer_object_merchant.py",
"/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_request.py",
"/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_product_shipping.py",
"/sdk/python/lib/io_stockx/models/address_object.py",
"/sdk/python/lib/io_stockx/models/search_hit.py",
"/sdk/python/lib/io_stockx/models/portfolio_request_portfolio_item.py",
"/sdk/python/lib/build/lib/io_stockx/models/billing_object.py",
"/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item.py",
"/sdk/python/lib/build/lib/io_stockx/models/search_results.py",
"/sdk/python/lib/io_stockx/models/product_info_product_attributes.py",
"/sdk/python/lib/build/lib/io_stockx/models/product_info_attributes.py",
"/sdk/python/lib/io_stockx/models/market_data_market.py",
"/sdk/python/lib/build/lib/io_stockx/models/portfolioitems_id_get_response_portfolio_item_product.py",
"/sdk/python/lib/io_stockx/models/customer_object.py",
"/sdk/python/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_merchant.py",
"/sdk/python/lib/build/lib/io_stockx/models/search_hit_searchable_traits.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/build/lib/io_stockx/models/billing_object.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.address_object import AddressObject # noqa: F401,E501
class BillingObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'card_type': 'str',
'token': 'str',
'last4': 'str',
'account_email': 'str',
'expiration_date': 'str',
'cardholder_name': 'str',
'address': 'AddressObject'
}
attribute_map = {
'card_type': 'cardType',
'token': 'token',
'last4': 'last4',
'account_email': 'accountEmail',
'expiration_date': 'expirationDate',
'cardholder_name': 'cardholderName',
'address': 'Address'
}
def __init__(self, card_type=None, token=None, last4=None, account_email=None, expiration_date=None, cardholder_name=None, address=None): # noqa: E501
"""BillingObject - a model defined in Swagger""" # noqa: E501
self._card_type = None
self._token = None
self._last4 = None
self._account_email = None
self._expiration_date = None
self._cardholder_name = None
self._address = None
self.discriminator = None
if card_type is not None:
self.card_type = card_type
if token is not None:
self.token = token
if last4 is not None:
self.last4 = last4
if account_email is not None:
self.account_email = account_email
if expiration_date is not None:
self.expiration_date = expiration_date
if cardholder_name is not None:
self.cardholder_name = cardholder_name
if address is not None:
self.address = address
@property
def card_type(self):
"""Gets the card_type of this BillingObject. # noqa: E501
:return: The card_type of this BillingObject. # noqa: E501
:rtype: str
"""
return self._card_type
@card_type.setter
def card_type(self, card_type):
"""Sets the card_type of this BillingObject.
:param card_type: The card_type of this BillingObject. # noqa: E501
:type: str
"""
self._card_type = card_type
@property
def token(self):
"""Gets the token of this BillingObject. # noqa: E501
:return: The token of this BillingObject. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this BillingObject.
:param token: The token of this BillingObject. # noqa: E501
:type: str
"""
self._token = token
@property
def last4(self):
"""Gets the last4 of this BillingObject. # noqa: E501
:return: The last4 of this BillingObject. # noqa: E501
:rtype: str
"""
return self._last4
@last4.setter
def last4(self, last4):
"""Sets the last4 of this BillingObject.
:param last4: The last4 of this BillingObject. # noqa: E501
:type: str
"""
self._last4 = last4
@property
def account_email(self):
"""Gets the account_email of this BillingObject. # noqa: E501
:return: The account_email of this BillingObject. # noqa: E501
:rtype: str
"""
return self._account_email
@account_email.setter
def account_email(self, account_email):
"""Sets the account_email of this BillingObject.
:param account_email: The account_email of this BillingObject. # noqa: E501
:type: str
"""
self._account_email = account_email
@property
def expiration_date(self):
"""Gets the expiration_date of this BillingObject. # noqa: E501
:return: The expiration_date of this BillingObject. # noqa: E501
:rtype: str
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""Sets the expiration_date of this BillingObject.
:param expiration_date: The expiration_date of this BillingObject. # noqa: E501
:type: str
"""
self._expiration_date = expiration_date
@property
def cardholder_name(self):
"""Gets the cardholder_name of this BillingObject. # noqa: E501
:return: The cardholder_name of this BillingObject. # noqa: E501
:rtype: str
"""
return self._cardholder_name
@cardholder_name.setter
def cardholder_name(self, cardholder_name):
"""Sets the cardholder_name of this BillingObject.
:param cardholder_name: The cardholder_name of this BillingObject. # noqa: E501
:type: str
"""
self._cardholder_name = cardholder_name
@property
def address(self):
"""Gets the address of this BillingObject. # noqa: E501
:return: The address of this BillingObject. # noqa: E501
:rtype: AddressObject
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this BillingObject.
:param address: The address of this BillingObject. # noqa: E501
:type: AddressObject
"""
self._address = address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BillingObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddressObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'first_name': 'str',
'last_name': 'str',
'telephone': 'str',
'street_address': 'str',
'extended_address': 'str',
'locality': 'str',
'region': 'str',
'postal_code': 'str',
'country_code_alpha2': 'str'
}
attribute_map = {
'first_name': 'firstName',
'last_name': 'lastName',
'telephone': 'telephone',
'street_address': 'streetAddress',
'extended_address': 'extendedAddress',
'locality': 'locality',
'region': 'region',
'postal_code': 'postalCode',
'country_code_alpha2': 'countryCodeAlpha2'
}
def __init__(self, first_name=None, last_name=None, telephone=None, street_address=None, extended_address=None, locality=None, region=None, postal_code=None, country_code_alpha2=None): # noqa: E501
"""AddressObject - a model defined in Swagger""" # noqa: E501
self._first_name = None
self._last_name = None
self._telephone = None
self._street_address = None
self._extended_address = None
self._locality = None
self._region = None
self._postal_code = None
self._country_code_alpha2 = None
self.discriminator = None
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if telephone is not None:
self.telephone = telephone
if street_address is not None:
self.street_address = street_address
if extended_address is not None:
self.extended_address = extended_address
if locality is not None:
self.locality = locality
if region is not None:
self.region = region
if postal_code is not None:
self.postal_code = postal_code
if country_code_alpha2 is not None:
self.country_code_alpha2 = country_code_alpha2
@property
def first_name(self):
"""Gets the first_name of this AddressObject. # noqa: E501
:return: The first_name of this AddressObject. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this AddressObject.
:param first_name: The first_name of this AddressObject. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this AddressObject. # noqa: E501
:return: The last_name of this AddressObject. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this AddressObject.
:param last_name: The last_name of this AddressObject. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def telephone(self):
"""Gets the telephone of this AddressObject. # noqa: E501
:return: The telephone of this AddressObject. # noqa: E501
:rtype: str
"""
return self._telephone
@telephone.setter
def telephone(self, telephone):
"""Sets the telephone of this AddressObject.
:param telephone: The telephone of this AddressObject. # noqa: E501
:type: str
"""
self._telephone = telephone
@property
def street_address(self):
"""Gets the street_address of this AddressObject. # noqa: E501
:return: The street_address of this AddressObject. # noqa: E501
:rtype: str
"""
return self._street_address
@street_address.setter
def street_address(self, street_address):
"""Sets the street_address of this AddressObject.
:param street_address: The street_address of this AddressObject. # noqa: E501
:type: str
"""
self._street_address = street_address
@property
def extended_address(self):
"""Gets the extended_address of this AddressObject. # noqa: E501
:return: The extended_address of this AddressObject. # noqa: E501
:rtype: str
"""
return self._extended_address
@extended_address.setter
def extended_address(self, extended_address):
"""Sets the extended_address of this AddressObject.
:param extended_address: The extended_address of this AddressObject. # noqa: E501
:type: str
"""
self._extended_address = extended_address
@property
def locality(self):
"""Gets the locality of this AddressObject. # noqa: E501
:return: The locality of this AddressObject. # noqa: E501
:rtype: str
"""
return self._locality
@locality.setter
def locality(self, locality):
"""Sets the locality of this AddressObject.
:param locality: The locality of this AddressObject. # noqa: E501
:type: str
"""
self._locality = locality
@property
def region(self):
"""Gets the region of this AddressObject. # noqa: E501
:return: The region of this AddressObject. # noqa: E501
:rtype: str
"""
return self._region
@region.setter
def region(self, region):
"""Sets the region of this AddressObject.
:param region: The region of this AddressObject. # noqa: E501
:type: str
"""
self._region = region
@property
def postal_code(self):
"""Gets the postal_code of this AddressObject. # noqa: E501
:return: The postal_code of this AddressObject. # noqa: E501
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""Sets the postal_code of this AddressObject.
:param postal_code: The postal_code of this AddressObject. # noqa: E501
:type: str
"""
self._postal_code = postal_code
@property
def country_code_alpha2(self):
"""Gets the country_code_alpha2 of this AddressObject. # noqa: E501
:return: The country_code_alpha2 of this AddressObject. # noqa: E501
:rtype: str
"""
return self._country_code_alpha2
@country_code_alpha2.setter
def country_code_alpha2(self, country_code_alpha2):
"""Sets the country_code_alpha2 of this AddressObject.
:param country_code_alpha2: The country_code_alpha2 of this AddressObject. # noqa: E501
:type: str
"""
self._country_code_alpha2 = country_code_alpha2
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddressObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py",
"/sdk/python/lib/io_stockx/models/customer_object.py"
],
"imports": [
"/sdk/python/lib/io_stockx/models/address_object.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.portfolio_id_del_response_portfolio_item_merchant import PortfolioIdDelResponsePortfolioItemMerchant # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product import PortfolioIdDelResponsePortfolioItemProduct # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_tracking import PortfolioIdDelResponsePortfolioItemTracking # noqa: F401,E501
class PortfolioIdDelResponsePortfolioItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'chain_id': 'str',
'customer_id': 'int',
'inventory_id': 'str',
'product_id': 'str',
'sku_uuid': 'str',
'merchant_id': 'int',
'condition': 'int',
'action': 'int',
'action_by': 'int',
'amount': 'int',
'expires_at': 'str',
'expires_at_time': 'int',
'gain_loss_dollars': 'int',
'gain_loss_percentage': 'int',
'market_value': 'str',
'matched_state': 'int',
'purchase_date': 'str',
'purchase_date_time': 'int',
'state': 'int',
'text': 'str',
'notes': 'str',
'created_at_time': 'int',
'can_edit': 'bool',
'can_delete': 'bool',
'tracking': 'PortfolioIdDelResponsePortfolioItemTracking',
'meta': 'object',
'product': 'PortfolioIdDelResponsePortfolioItemProduct',
'merchant': 'PortfolioIdDelResponsePortfolioItemMerchant'
}
attribute_map = {
'chain_id': 'chainId',
'customer_id': 'customerId',
'inventory_id': 'inventoryId',
'product_id': 'productId',
'sku_uuid': 'skuUuid',
'merchant_id': 'merchantId',
'condition': 'condition',
'action': 'action',
'action_by': 'actionBy',
'amount': 'amount',
'expires_at': 'expiresAt',
'expires_at_time': 'expiresAtTime',
'gain_loss_dollars': 'gainLossDollars',
'gain_loss_percentage': 'gainLossPercentage',
'market_value': 'marketValue',
'matched_state': 'matchedState',
'purchase_date': 'purchaseDate',
'purchase_date_time': 'purchaseDateTime',
'state': 'state',
'text': 'text',
'notes': 'notes',
'created_at_time': 'createdAtTime',
'can_edit': 'canEdit',
'can_delete': 'canDelete',
'tracking': 'Tracking',
'meta': 'meta',
'product': 'product',
'merchant': 'Merchant'
}
def __init__(self, chain_id=None, customer_id=None, inventory_id=None, product_id=None, sku_uuid=None, merchant_id=None, condition=None, action=None, action_by=None, amount=None, expires_at=None, expires_at_time=None, gain_loss_dollars=None, gain_loss_percentage=None, market_value=None, matched_state=None, purchase_date=None, purchase_date_time=None, state=None, text=None, notes=None, created_at_time=None, can_edit=None, can_delete=None, tracking=None, meta=None, product=None, merchant=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItem - a model defined in Swagger""" # noqa: E501
self._chain_id = None
self._customer_id = None
self._inventory_id = None
self._product_id = None
self._sku_uuid = None
self._merchant_id = None
self._condition = None
self._action = None
self._action_by = None
self._amount = None
self._expires_at = None
self._expires_at_time = None
self._gain_loss_dollars = None
self._gain_loss_percentage = None
self._market_value = None
self._matched_state = None
self._purchase_date = None
self._purchase_date_time = None
self._state = None
self._text = None
self._notes = None
self._created_at_time = None
self._can_edit = None
self._can_delete = None
self._tracking = None
self._meta = None
self._product = None
self._merchant = None
self.discriminator = None
self.chain_id = chain_id
self.customer_id = customer_id
self.inventory_id = inventory_id
self.product_id = product_id
self.sku_uuid = sku_uuid
self.merchant_id = merchant_id
self.condition = condition
self.action = action
self.action_by = action_by
self.amount = amount
self.expires_at = expires_at
self.expires_at_time = expires_at_time
self.gain_loss_dollars = gain_loss_dollars
self.gain_loss_percentage = gain_loss_percentage
self.market_value = market_value
self.matched_state = matched_state
self.purchase_date = purchase_date
self.purchase_date_time = purchase_date_time
self.state = state
self.text = text
self.notes = notes
self.created_at_time = created_at_time
self.can_edit = can_edit
self.can_delete = can_delete
self.tracking = tracking
self.meta = meta
self.product = product
self.merchant = merchant
@property
def chain_id(self):
"""Gets the chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._chain_id
@chain_id.setter
def chain_id(self, chain_id):
"""Sets the chain_id of this PortfolioIdDelResponsePortfolioItem.
:param chain_id: The chain_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if chain_id is None:
raise ValueError("Invalid value for `chain_id`, must not be `None`") # noqa: E501
self._chain_id = chain_id
@property
def customer_id(self):
"""Gets the customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this PortfolioIdDelResponsePortfolioItem.
:param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def inventory_id(self):
"""Gets the inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._inventory_id
@inventory_id.setter
def inventory_id(self, inventory_id):
"""Sets the inventory_id of this PortfolioIdDelResponsePortfolioItem.
:param inventory_id: The inventory_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if inventory_id is None:
raise ValueError("Invalid value for `inventory_id`, must not be `None`") # noqa: E501
self._inventory_id = inventory_id
@property
def product_id(self):
"""Gets the product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this PortfolioIdDelResponsePortfolioItem.
:param product_id: The product_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if product_id is None:
raise ValueError("Invalid value for `product_id`, must not be `None`") # noqa: E501
self._product_id = product_id
@property
def sku_uuid(self):
"""Gets the sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._sku_uuid
@sku_uuid.setter
def sku_uuid(self, sku_uuid):
"""Sets the sku_uuid of this PortfolioIdDelResponsePortfolioItem.
:param sku_uuid: The sku_uuid of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if sku_uuid is None:
raise ValueError("Invalid value for `sku_uuid`, must not be `None`") # noqa: E501
self._sku_uuid = sku_uuid
@property
def merchant_id(self):
"""Gets the merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this PortfolioIdDelResponsePortfolioItem.
:param merchant_id: The merchant_id of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if merchant_id is None:
raise ValueError("Invalid value for `merchant_id`, must not be `None`") # noqa: E501
self._merchant_id = merchant_id
@property
def condition(self):
"""Gets the condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PortfolioIdDelResponsePortfolioItem.
:param condition: The condition of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if condition is None:
raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501
self._condition = condition
@property
def action(self):
"""Gets the action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this PortfolioIdDelResponsePortfolioItem.
:param action: The action of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if action is None:
raise ValueError("Invalid value for `action`, must not be `None`") # noqa: E501
self._action = action
@property
def action_by(self):
"""Gets the action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._action_by
@action_by.setter
def action_by(self, action_by):
"""Sets the action_by of this PortfolioIdDelResponsePortfolioItem.
:param action_by: The action_by of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if action_by is None:
raise ValueError("Invalid value for `action_by`, must not be `None`") # noqa: E501
self._action_by = action_by
@property
def amount(self):
"""Gets the amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this PortfolioIdDelResponsePortfolioItem.
:param amount: The amount of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if amount is None:
raise ValueError("Invalid value for `amount`, must not be `None`") # noqa: E501
self._amount = amount
@property
def expires_at(self):
"""Gets the expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._expires_at
@expires_at.setter
def expires_at(self, expires_at):
"""Sets the expires_at of this PortfolioIdDelResponsePortfolioItem.
:param expires_at: The expires_at of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if expires_at is None:
raise ValueError("Invalid value for `expires_at`, must not be `None`") # noqa: E501
self._expires_at = expires_at
@property
def expires_at_time(self):
"""Gets the expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._expires_at_time
@expires_at_time.setter
def expires_at_time(self, expires_at_time):
"""Sets the expires_at_time of this PortfolioIdDelResponsePortfolioItem.
:param expires_at_time: The expires_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if expires_at_time is None:
raise ValueError("Invalid value for `expires_at_time`, must not be `None`") # noqa: E501
self._expires_at_time = expires_at_time
@property
def gain_loss_dollars(self):
"""Gets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._gain_loss_dollars
@gain_loss_dollars.setter
def gain_loss_dollars(self, gain_loss_dollars):
"""Sets the gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem.
:param gain_loss_dollars: The gain_loss_dollars of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if gain_loss_dollars is None:
raise ValueError("Invalid value for `gain_loss_dollars`, must not be `None`") # noqa: E501
self._gain_loss_dollars = gain_loss_dollars
@property
def gain_loss_percentage(self):
"""Gets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._gain_loss_percentage
@gain_loss_percentage.setter
def gain_loss_percentage(self, gain_loss_percentage):
"""Sets the gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem.
:param gain_loss_percentage: The gain_loss_percentage of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if gain_loss_percentage is None:
raise ValueError("Invalid value for `gain_loss_percentage`, must not be `None`") # noqa: E501
self._gain_loss_percentage = gain_loss_percentage
@property
def market_value(self):
"""Gets the market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._market_value
@market_value.setter
def market_value(self, market_value):
"""Sets the market_value of this PortfolioIdDelResponsePortfolioItem.
:param market_value: The market_value of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if market_value is None:
raise ValueError("Invalid value for `market_value`, must not be `None`") # noqa: E501
self._market_value = market_value
@property
def matched_state(self):
"""Gets the matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._matched_state
@matched_state.setter
def matched_state(self, matched_state):
"""Sets the matched_state of this PortfolioIdDelResponsePortfolioItem.
:param matched_state: The matched_state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if matched_state is None:
raise ValueError("Invalid value for `matched_state`, must not be `None`") # noqa: E501
self._matched_state = matched_state
@property
def purchase_date(self):
"""Gets the purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._purchase_date
@purchase_date.setter
def purchase_date(self, purchase_date):
"""Sets the purchase_date of this PortfolioIdDelResponsePortfolioItem.
:param purchase_date: The purchase_date of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if purchase_date is None:
raise ValueError("Invalid value for `purchase_date`, must not be `None`") # noqa: E501
self._purchase_date = purchase_date
@property
def purchase_date_time(self):
"""Gets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._purchase_date_time
@purchase_date_time.setter
def purchase_date_time(self, purchase_date_time):
"""Sets the purchase_date_time of this PortfolioIdDelResponsePortfolioItem.
:param purchase_date_time: The purchase_date_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if purchase_date_time is None:
raise ValueError("Invalid value for `purchase_date_time`, must not be `None`") # noqa: E501
self._purchase_date_time = purchase_date_time
@property
def state(self):
"""Gets the state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this PortfolioIdDelResponsePortfolioItem.
:param state: The state of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if state is None:
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
self._state = state
@property
def text(self):
"""Gets the text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this PortfolioIdDelResponsePortfolioItem.
:param text: The text of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if text is None:
raise ValueError("Invalid value for `text`, must not be `None`") # noqa: E501
self._text = text
@property
def notes(self):
"""Gets the notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this PortfolioIdDelResponsePortfolioItem.
:param notes: The notes of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: str
"""
if notes is None:
raise ValueError("Invalid value for `notes`, must not be `None`") # noqa: E501
self._notes = notes
@property
def created_at_time(self):
"""Gets the created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: int
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this PortfolioIdDelResponsePortfolioItem.
:param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: int
"""
if created_at_time is None:
raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501
self._created_at_time = created_at_time
@property
def can_edit(self):
"""Gets the can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: bool
"""
return self._can_edit
@can_edit.setter
def can_edit(self, can_edit):
"""Sets the can_edit of this PortfolioIdDelResponsePortfolioItem.
:param can_edit: The can_edit of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: bool
"""
if can_edit is None:
raise ValueError("Invalid value for `can_edit`, must not be `None`") # noqa: E501
self._can_edit = can_edit
@property
def can_delete(self):
"""Gets the can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: bool
"""
return self._can_delete
@can_delete.setter
def can_delete(self, can_delete):
"""Sets the can_delete of this PortfolioIdDelResponsePortfolioItem.
:param can_delete: The can_delete of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: bool
"""
if can_delete is None:
raise ValueError("Invalid value for `can_delete`, must not be `None`") # noqa: E501
self._can_delete = can_delete
@property
def tracking(self):
"""Gets the tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemTracking
"""
return self._tracking
@tracking.setter
def tracking(self, tracking):
"""Sets the tracking of this PortfolioIdDelResponsePortfolioItem.
:param tracking: The tracking of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemTracking
"""
if tracking is None:
raise ValueError("Invalid value for `tracking`, must not be `None`") # noqa: E501
self._tracking = tracking
@property
def meta(self):
"""Gets the meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: object
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this PortfolioIdDelResponsePortfolioItem.
:param meta: The meta of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: object
"""
if meta is None:
raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501
self._meta = meta
@property
def product(self):
"""Gets the product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProduct
"""
return self._product
@product.setter
def product(self, product):
"""Sets the product of this PortfolioIdDelResponsePortfolioItem.
:param product: The product of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProduct
"""
if product is None:
raise ValueError("Invalid value for `product`, must not be `None`") # noqa: E501
self._product = product
@property
def merchant(self):
"""Gets the merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:return: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemMerchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this PortfolioIdDelResponsePortfolioItem.
:param merchant: The merchant of this PortfolioIdDelResponsePortfolioItem. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemMerchant
"""
if merchant is None:
raise ValueError("Invalid value for `merchant`, must not be `None`") # noqa: E501
self._merchant = merchant
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioIdDelResponsePortfolioItemMerchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'customer_id': 'int',
'is_robot': 'int',
'name': 'str',
'paypal_email': 'str',
'take': 'float',
'created_at': 'str',
'created_at_time': 'int',
'updated_at': 'str',
'updated_at_time': 'int'
}
attribute_map = {
'id': 'id',
'customer_id': 'customerId',
'is_robot': 'isRobot',
'name': 'name',
'paypal_email': 'paypalEmail',
'take': 'take',
'created_at': 'createdAt',
'created_at_time': 'createdAtTime',
'updated_at': 'updatedAt',
'updated_at_time': 'updatedAtTime'
}
def __init__(self, id=None, customer_id=None, is_robot=None, name=None, paypal_email=None, take=None, created_at=None, created_at_time=None, updated_at=None, updated_at_time=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItemMerchant - a model defined in Swagger""" # noqa: E501
self._id = None
self._customer_id = None
self._is_robot = None
self._name = None
self._paypal_email = None
self._take = None
self._created_at = None
self._created_at_time = None
self._updated_at = None
self._updated_at_time = None
self.discriminator = None
self.id = id
self.customer_id = customer_id
self.is_robot = is_robot
self.name = name
self.paypal_email = paypal_email
self.take = take
self.created_at = created_at
self.created_at_time = created_at_time
self.updated_at = updated_at
self.updated_at_time = updated_at_time
@property
def id(self):
"""Gets the id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PortfolioIdDelResponsePortfolioItemMerchant.
:param id: The id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def customer_id(self):
"""Gets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this PortfolioIdDelResponsePortfolioItemMerchant.
:param customer_id: The customer_id of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if customer_id is None:
raise ValueError("Invalid value for `customer_id`, must not be `None`") # noqa: E501
self._customer_id = customer_id
@property
def is_robot(self):
"""Gets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._is_robot
@is_robot.setter
def is_robot(self, is_robot):
"""Sets the is_robot of this PortfolioIdDelResponsePortfolioItemMerchant.
:param is_robot: The is_robot of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if is_robot is None:
raise ValueError("Invalid value for `is_robot`, must not be `None`") # noqa: E501
self._is_robot = is_robot
@property
def name(self):
"""Gets the name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PortfolioIdDelResponsePortfolioItemMerchant.
:param name: The name of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def paypal_email(self):
"""Gets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._paypal_email
@paypal_email.setter
def paypal_email(self, paypal_email):
"""Sets the paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant.
:param paypal_email: The paypal_email of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if paypal_email is None:
raise ValueError("Invalid value for `paypal_email`, must not be `None`") # noqa: E501
self._paypal_email = paypal_email
@property
def take(self):
"""Gets the take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: float
"""
return self._take
@take.setter
def take(self, take):
"""Sets the take of this PortfolioIdDelResponsePortfolioItemMerchant.
:param take: The take of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: float
"""
if take is None:
raise ValueError("Invalid value for `take`, must not be `None`") # noqa: E501
self._take = take
@property
def created_at(self):
"""Gets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this PortfolioIdDelResponsePortfolioItemMerchant.
:param created_at: The created_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def created_at_time(self):
"""Gets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant.
:param created_at_time: The created_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if created_at_time is None:
raise ValueError("Invalid value for `created_at_time`, must not be `None`") # noqa: E501
self._created_at_time = created_at_time
@property
def updated_at(self):
"""Gets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this PortfolioIdDelResponsePortfolioItemMerchant.
:param updated_at: The updated_at of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: str
"""
if updated_at is None:
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def updated_at_time(self):
"""Gets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:return: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:rtype: int
"""
return self._updated_at_time
@updated_at_time.setter
def updated_at_time(self, updated_at_time):
"""Sets the updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant.
:param updated_at_time: The updated_at_time of this PortfolioIdDelResponsePortfolioItemMerchant. # noqa: E501
:type: int
"""
if updated_at_time is None:
raise ValueError("Invalid value for `updated_at_time`, must not be `None`") # noqa: E501
self._updated_at_time = updated_at_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItemMerchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py"
],
"imports": [
"/sdk/python/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_merchant.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/build/lib/io_stockx/models/portfolioitems_id_get_response_portfolio_item_product.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_meta import PortfolioIdDelResponsePortfolioItemProductMeta # noqa: F401,E501
from io_stockx.models.portfolio_id_del_response_portfolio_item_product_shipping import PortfolioIdDelResponsePortfolioItemProductShipping # noqa: F401,E501
from io_stockx.models.portfolioitems_id_get_response_portfolio_item_product_market import PortfolioitemsIdGetResponsePortfolioItemProductMarket # noqa: F401,E501
class PortfolioitemsIdGetResponsePortfolioItemProduct(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uuid': 'str',
'brand': 'str',
'category': 'str',
'charity_condition': 'int',
'colorway': 'str',
'condition': 'str',
'country_of_manufacture': 'str',
'gender': 'str',
'content_group': 'str',
'minimum_bid': 'int',
'media': 'PortfolioIdDelResponsePortfolioItemProductMedia',
'name': 'str',
'primary_category': 'str',
'secondary_category': 'str',
'product_category': 'str',
'release_date': 'str',
'retail_price': 'int',
'shoe': 'str',
'short_description': 'str',
'style_id': 'str',
'ticker_symbol': 'str',
'title': 'str',
'data_type': 'str',
'type': 'int',
'size_title': 'str',
'size_descriptor': 'str',
'size_all_descriptor': 'str',
'url_key': 'str',
'year': 'str',
'shipping_group': 'str',
'a_lim': 'int',
'meta': 'PortfolioIdDelResponsePortfolioItemProductMeta',
'shipping': 'PortfolioIdDelResponsePortfolioItemProductShipping',
'children': 'object',
'parent_id': 'str',
'parent_uuid': 'str',
'size_sort_order': 'int',
'shoe_size': 'str',
'market': 'PortfolioitemsIdGetResponsePortfolioItemProductMarket',
'upc': 'str'
}
attribute_map = {
'id': 'id',
'uuid': 'uuid',
'brand': 'brand',
'category': 'category',
'charity_condition': 'charityCondition',
'colorway': 'colorway',
'condition': 'condition',
'country_of_manufacture': 'countryOfManufacture',
'gender': 'gender',
'content_group': 'contentGroup',
'minimum_bid': 'minimumBid',
'media': 'media',
'name': 'name',
'primary_category': 'primaryCategory',
'secondary_category': 'secondaryCategory',
'product_category': 'productCategory',
'release_date': 'releaseDate',
'retail_price': 'retailPrice',
'shoe': 'shoe',
'short_description': 'shortDescription',
'style_id': 'styleId',
'ticker_symbol': 'tickerSymbol',
'title': 'title',
'data_type': 'dataType',
'type': 'type',
'size_title': 'sizeTitle',
'size_descriptor': 'sizeDescriptor',
'size_all_descriptor': 'sizeAllDescriptor',
'url_key': 'urlKey',
'year': 'year',
'shipping_group': 'shippingGroup',
'a_lim': 'aLim',
'meta': 'meta',
'shipping': 'shipping',
'children': 'children',
'parent_id': 'parentId',
'parent_uuid': 'parentUuid',
'size_sort_order': 'sizeSortOrder',
'shoe_size': 'shoeSize',
'market': 'market',
'upc': 'upc'
}
def __init__(self, id=None, uuid=None, brand=None, category=None, charity_condition=None, colorway=None, condition=None, country_of_manufacture=None, gender=None, content_group=None, minimum_bid=None, media=None, name=None, primary_category=None, secondary_category=None, product_category=None, release_date=None, retail_price=None, shoe=None, short_description=None, style_id=None, ticker_symbol=None, title=None, data_type=None, type=None, size_title=None, size_descriptor=None, size_all_descriptor=None, url_key=None, year=None, shipping_group=None, a_lim=None, meta=None, shipping=None, children=None, parent_id=None, parent_uuid=None, size_sort_order=None, shoe_size=None, market=None, upc=None): # noqa: E501
"""PortfolioitemsIdGetResponsePortfolioItemProduct - a model defined in Swagger""" # noqa: E501
self._id = None
self._uuid = None
self._brand = None
self._category = None
self._charity_condition = None
self._colorway = None
self._condition = None
self._country_of_manufacture = None
self._gender = None
self._content_group = None
self._minimum_bid = None
self._media = None
self._name = None
self._primary_category = None
self._secondary_category = None
self._product_category = None
self._release_date = None
self._retail_price = None
self._shoe = None
self._short_description = None
self._style_id = None
self._ticker_symbol = None
self._title = None
self._data_type = None
self._type = None
self._size_title = None
self._size_descriptor = None
self._size_all_descriptor = None
self._url_key = None
self._year = None
self._shipping_group = None
self._a_lim = None
self._meta = None
self._shipping = None
self._children = None
self._parent_id = None
self._parent_uuid = None
self._size_sort_order = None
self._shoe_size = None
self._market = None
self._upc = None
self.discriminator = None
self.id = id
self.uuid = uuid
self.brand = brand
self.category = category
self.charity_condition = charity_condition
self.colorway = colorway
self.condition = condition
self.country_of_manufacture = country_of_manufacture
self.gender = gender
self.content_group = content_group
self.minimum_bid = minimum_bid
self.media = media
self.name = name
self.primary_category = primary_category
self.secondary_category = secondary_category
self.product_category = product_category
self.release_date = release_date
self.retail_price = retail_price
self.shoe = shoe
self.short_description = short_description
self.style_id = style_id
self.ticker_symbol = ticker_symbol
self.title = title
self.data_type = data_type
self.type = type
self.size_title = size_title
self.size_descriptor = size_descriptor
self.size_all_descriptor = size_all_descriptor
self.url_key = url_key
self.year = year
self.shipping_group = shipping_group
self.a_lim = a_lim
self.meta = meta
self.shipping = shipping
self.children = children
self.parent_id = parent_id
self.parent_uuid = parent_uuid
self.size_sort_order = size_sort_order
self.shoe_size = shoe_size
self.market = market
self.upc = upc
@property
def id(self):
"""Gets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param id: The id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def uuid(self):
"""Gets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param uuid: The uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if uuid is None:
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
self._uuid = uuid
@property
def brand(self):
"""Gets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param brand: The brand of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if brand is None:
raise ValueError("Invalid value for `brand`, must not be `None`") # noqa: E501
self._brand = brand
@property
def category(self):
"""Gets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param category: The category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if category is None:
raise ValueError("Invalid value for `category`, must not be `None`") # noqa: E501
self._category = category
@property
def charity_condition(self):
"""Gets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._charity_condition
@charity_condition.setter
def charity_condition(self, charity_condition):
"""Sets the charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param charity_condition: The charity_condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if charity_condition is None:
raise ValueError("Invalid value for `charity_condition`, must not be `None`") # noqa: E501
self._charity_condition = charity_condition
@property
def colorway(self):
"""Gets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param colorway: The colorway of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if colorway is None:
raise ValueError("Invalid value for `colorway`, must not be `None`") # noqa: E501
self._colorway = colorway
@property
def condition(self):
"""Gets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""Sets the condition of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param condition: The condition of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if condition is None:
raise ValueError("Invalid value for `condition`, must not be `None`") # noqa: E501
self._condition = condition
@property
def country_of_manufacture(self):
"""Gets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._country_of_manufacture
@country_of_manufacture.setter
def country_of_manufacture(self, country_of_manufacture):
"""Sets the country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param country_of_manufacture: The country_of_manufacture of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if country_of_manufacture is None:
raise ValueError("Invalid value for `country_of_manufacture`, must not be `None`") # noqa: E501
self._country_of_manufacture = country_of_manufacture
@property
def gender(self):
"""Gets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param gender: The gender of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if gender is None:
raise ValueError("Invalid value for `gender`, must not be `None`") # noqa: E501
self._gender = gender
@property
def content_group(self):
"""Gets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._content_group
@content_group.setter
def content_group(self, content_group):
"""Sets the content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param content_group: The content_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if content_group is None:
raise ValueError("Invalid value for `content_group`, must not be `None`") # noqa: E501
self._content_group = content_group
@property
def minimum_bid(self):
"""Gets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._minimum_bid
@minimum_bid.setter
def minimum_bid(self, minimum_bid):
"""Sets the minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param minimum_bid: The minimum_bid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if minimum_bid is None:
raise ValueError("Invalid value for `minimum_bid`, must not be `None`") # noqa: E501
self._minimum_bid = minimum_bid
@property
def media(self):
"""Gets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductMedia
"""
return self._media
@media.setter
def media(self, media):
"""Sets the media of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param media: The media of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductMedia
"""
if media is None:
raise ValueError("Invalid value for `media`, must not be `None`") # noqa: E501
self._media = media
@property
def name(self):
"""Gets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param name: The name of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def primary_category(self):
"""Gets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._primary_category
@primary_category.setter
def primary_category(self, primary_category):
"""Sets the primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param primary_category: The primary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if primary_category is None:
raise ValueError("Invalid value for `primary_category`, must not be `None`") # noqa: E501
self._primary_category = primary_category
@property
def secondary_category(self):
"""Gets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._secondary_category
@secondary_category.setter
def secondary_category(self, secondary_category):
"""Sets the secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param secondary_category: The secondary_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if secondary_category is None:
raise ValueError("Invalid value for `secondary_category`, must not be `None`") # noqa: E501
self._secondary_category = secondary_category
@property
def product_category(self):
"""Gets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param product_category: The product_category of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if product_category is None:
raise ValueError("Invalid value for `product_category`, must not be `None`") # noqa: E501
self._product_category = product_category
@property
def release_date(self):
"""Gets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param release_date: The release_date of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if release_date is None:
raise ValueError("Invalid value for `release_date`, must not be `None`") # noqa: E501
self._release_date = release_date
@property
def retail_price(self):
"""Gets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._retail_price
@retail_price.setter
def retail_price(self, retail_price):
"""Sets the retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param retail_price: The retail_price of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if retail_price is None:
raise ValueError("Invalid value for `retail_price`, must not be `None`") # noqa: E501
self._retail_price = retail_price
@property
def shoe(self):
"""Gets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shoe
@shoe.setter
def shoe(self, shoe):
"""Sets the shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shoe: The shoe of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shoe is None:
raise ValueError("Invalid value for `shoe`, must not be `None`") # noqa: E501
self._shoe = shoe
@property
def short_description(self):
"""Gets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param short_description: The short_description of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if short_description is None:
raise ValueError("Invalid value for `short_description`, must not be `None`") # noqa: E501
self._short_description = short_description
@property
def style_id(self):
"""Gets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param style_id: The style_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if style_id is None:
raise ValueError("Invalid value for `style_id`, must not be `None`") # noqa: E501
self._style_id = style_id
@property
def ticker_symbol(self):
"""Gets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._ticker_symbol
@ticker_symbol.setter
def ticker_symbol(self, ticker_symbol):
"""Sets the ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param ticker_symbol: The ticker_symbol of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if ticker_symbol is None:
raise ValueError("Invalid value for `ticker_symbol`, must not be `None`") # noqa: E501
self._ticker_symbol = ticker_symbol
@property
def title(self):
"""Gets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param title: The title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def data_type(self):
"""Gets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param data_type: The data_type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if data_type is None:
raise ValueError("Invalid value for `data_type`, must not be `None`") # noqa: E501
self._data_type = data_type
@property
def type(self):
"""Gets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param type: The type of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def size_title(self):
"""Gets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_title
@size_title.setter
def size_title(self, size_title):
"""Sets the size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_title: The size_title of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_title is None:
raise ValueError("Invalid value for `size_title`, must not be `None`") # noqa: E501
self._size_title = size_title
@property
def size_descriptor(self):
"""Gets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_descriptor
@size_descriptor.setter
def size_descriptor(self, size_descriptor):
"""Sets the size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_descriptor: The size_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_descriptor is None:
raise ValueError("Invalid value for `size_descriptor`, must not be `None`") # noqa: E501
self._size_descriptor = size_descriptor
@property
def size_all_descriptor(self):
"""Gets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._size_all_descriptor
@size_all_descriptor.setter
def size_all_descriptor(self, size_all_descriptor):
"""Sets the size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_all_descriptor: The size_all_descriptor of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if size_all_descriptor is None:
raise ValueError("Invalid value for `size_all_descriptor`, must not be `None`") # noqa: E501
self._size_all_descriptor = size_all_descriptor
@property
def url_key(self):
"""Gets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._url_key
@url_key.setter
def url_key(self, url_key):
"""Sets the url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param url_key: The url_key of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if url_key is None:
raise ValueError("Invalid value for `url_key`, must not be `None`") # noqa: E501
self._url_key = url_key
@property
def year(self):
"""Gets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._year
@year.setter
def year(self, year):
"""Sets the year of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param year: The year of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if year is None:
raise ValueError("Invalid value for `year`, must not be `None`") # noqa: E501
self._year = year
@property
def shipping_group(self):
"""Gets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shipping_group
@shipping_group.setter
def shipping_group(self, shipping_group):
"""Sets the shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shipping_group: The shipping_group of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shipping_group is None:
raise ValueError("Invalid value for `shipping_group`, must not be `None`") # noqa: E501
self._shipping_group = shipping_group
@property
def a_lim(self):
"""Gets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._a_lim
@a_lim.setter
def a_lim(self, a_lim):
"""Sets the a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param a_lim: The a_lim of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if a_lim is None:
raise ValueError("Invalid value for `a_lim`, must not be `None`") # noqa: E501
self._a_lim = a_lim
@property
def meta(self):
"""Gets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param meta: The meta of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductMeta
"""
if meta is None:
raise ValueError("Invalid value for `meta`, must not be `None`") # noqa: E501
self._meta = meta
@property
def shipping(self):
"""Gets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioIdDelResponsePortfolioItemProductShipping
"""
return self._shipping
@shipping.setter
def shipping(self, shipping):
"""Sets the shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shipping: The shipping of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioIdDelResponsePortfolioItemProductShipping
"""
if shipping is None:
raise ValueError("Invalid value for `shipping`, must not be `None`") # noqa: E501
self._shipping = shipping
@property
def children(self):
"""Gets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: object
"""
return self._children
@children.setter
def children(self, children):
"""Sets the children of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param children: The children of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: object
"""
if children is None:
raise ValueError("Invalid value for `children`, must not be `None`") # noqa: E501
self._children = children
@property
def parent_id(self):
"""Gets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param parent_id: The parent_id of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if parent_id is None:
raise ValueError("Invalid value for `parent_id`, must not be `None`") # noqa: E501
self._parent_id = parent_id
@property
def parent_uuid(self):
"""Gets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._parent_uuid
@parent_uuid.setter
def parent_uuid(self, parent_uuid):
"""Sets the parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param parent_uuid: The parent_uuid of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if parent_uuid is None:
raise ValueError("Invalid value for `parent_uuid`, must not be `None`") # noqa: E501
self._parent_uuid = parent_uuid
@property
def size_sort_order(self):
"""Gets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: int
"""
return self._size_sort_order
@size_sort_order.setter
def size_sort_order(self, size_sort_order):
"""Sets the size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param size_sort_order: The size_sort_order of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: int
"""
if size_sort_order is None:
raise ValueError("Invalid value for `size_sort_order`, must not be `None`") # noqa: E501
self._size_sort_order = size_sort_order
@property
def shoe_size(self):
"""Gets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._shoe_size
@shoe_size.setter
def shoe_size(self, shoe_size):
"""Sets the shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param shoe_size: The shoe_size of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if shoe_size is None:
raise ValueError("Invalid value for `shoe_size`, must not be `None`") # noqa: E501
self._shoe_size = shoe_size
@property
def market(self):
"""Gets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: PortfolioitemsIdGetResponsePortfolioItemProductMarket
"""
return self._market
@market.setter
def market(self, market):
"""Sets the market of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param market: The market of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: PortfolioitemsIdGetResponsePortfolioItemProductMarket
"""
if market is None:
raise ValueError("Invalid value for `market`, must not be `None`") # noqa: E501
self._market = market
@property
def upc(self):
"""Gets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:return: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:rtype: str
"""
return self._upc
@upc.setter
def upc(self, upc):
"""Sets the upc of this PortfolioitemsIdGetResponsePortfolioItemProduct.
:param upc: The upc of this PortfolioitemsIdGetResponsePortfolioItemProduct. # noqa: E501
:type: str
"""
if upc is None:
raise ValueError("Invalid value for `upc`, must not be `None`") # noqa: E501
self._upc = upc
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioitemsIdGetResponsePortfolioItemProduct):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PortfolioIdDelResponsePortfolioItemProductShipping(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'total_days_to_ship': 'int',
'has_additional_days_to_ship': 'bool',
'delivery_days_lower_bound': 'int',
'delivery_days_upper_bound': 'int'
}
attribute_map = {
'total_days_to_ship': 'totalDaysToShip',
'has_additional_days_to_ship': 'hasAdditionalDaysToShip',
'delivery_days_lower_bound': 'deliveryDaysLowerBound',
'delivery_days_upper_bound': 'deliveryDaysUpperBound'
}
def __init__(self, total_days_to_ship=None, has_additional_days_to_ship=None, delivery_days_lower_bound=None, delivery_days_upper_bound=None): # noqa: E501
"""PortfolioIdDelResponsePortfolioItemProductShipping - a model defined in Swagger""" # noqa: E501
self._total_days_to_ship = None
self._has_additional_days_to_ship = None
self._delivery_days_lower_bound = None
self._delivery_days_upper_bound = None
self.discriminator = None
self.total_days_to_ship = total_days_to_ship
self.has_additional_days_to_ship = has_additional_days_to_ship
self.delivery_days_lower_bound = delivery_days_lower_bound
self.delivery_days_upper_bound = delivery_days_upper_bound
@property
def total_days_to_ship(self):
"""Gets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._total_days_to_ship
@total_days_to_ship.setter
def total_days_to_ship(self, total_days_to_ship):
"""Sets the total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param total_days_to_ship: The total_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if total_days_to_ship is None:
raise ValueError("Invalid value for `total_days_to_ship`, must not be `None`") # noqa: E501
self._total_days_to_ship = total_days_to_ship
@property
def has_additional_days_to_ship(self):
"""Gets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: bool
"""
return self._has_additional_days_to_ship
@has_additional_days_to_ship.setter
def has_additional_days_to_ship(self, has_additional_days_to_ship):
"""Sets the has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param has_additional_days_to_ship: The has_additional_days_to_ship of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: bool
"""
if has_additional_days_to_ship is None:
raise ValueError("Invalid value for `has_additional_days_to_ship`, must not be `None`") # noqa: E501
self._has_additional_days_to_ship = has_additional_days_to_ship
@property
def delivery_days_lower_bound(self):
"""Gets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._delivery_days_lower_bound
@delivery_days_lower_bound.setter
def delivery_days_lower_bound(self, delivery_days_lower_bound):
"""Sets the delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param delivery_days_lower_bound: The delivery_days_lower_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if delivery_days_lower_bound is None:
raise ValueError("Invalid value for `delivery_days_lower_bound`, must not be `None`") # noqa: E501
self._delivery_days_lower_bound = delivery_days_lower_bound
@property
def delivery_days_upper_bound(self):
"""Gets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:return: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:rtype: int
"""
return self._delivery_days_upper_bound
@delivery_days_upper_bound.setter
def delivery_days_upper_bound(self, delivery_days_upper_bound):
"""Sets the delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping.
:param delivery_days_upper_bound: The delivery_days_upper_bound of this PortfolioIdDelResponsePortfolioItemProductShipping. # noqa: E501
:type: int
"""
if delivery_days_upper_bound is None:
raise ValueError("Invalid value for `delivery_days_upper_bound`, must not be `None`") # noqa: E501
self._delivery_days_upper_bound = delivery_days_upper_bound
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioIdDelResponsePortfolioItemProductShipping):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py"
],
"imports": [
"/sdk/python/lib/build/lib/io_stockx/models/portfolio_id_del_response_portfolio_item_product_shipping.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/build/lib/io_stockx/models/search_results.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.search_hit import SearchHit # noqa: F401,E501
class SearchResults(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hits': 'list[SearchHit]',
'nb_hits': 'int'
}
attribute_map = {
'hits': 'hits',
'nb_hits': 'nbHits'
}
def __init__(self, hits=None, nb_hits=None): # noqa: E501
"""SearchResults - a model defined in Swagger""" # noqa: E501
self._hits = None
self._nb_hits = None
self.discriminator = None
if hits is not None:
self.hits = hits
if nb_hits is not None:
self.nb_hits = nb_hits
@property
def hits(self):
"""Gets the hits of this SearchResults. # noqa: E501
:return: The hits of this SearchResults. # noqa: E501
:rtype: list[SearchHit]
"""
return self._hits
@hits.setter
def hits(self, hits):
"""Sets the hits of this SearchResults.
:param hits: The hits of this SearchResults. # noqa: E501
:type: list[SearchHit]
"""
self._hits = hits
@property
def nb_hits(self):
"""Gets the nb_hits of this SearchResults. # noqa: E501
:return: The nb_hits of this SearchResults. # noqa: E501
:rtype: int
"""
return self._nb_hits
@nb_hits.setter
def nb_hits(self, nb_hits):
"""Sets the nb_hits of this SearchResults.
:param nb_hits: The nb_hits of this SearchResults. # noqa: E501
:type: int
"""
self._nb_hits = nb_hits
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchResults):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.search_hit_media import SearchHitMedia # noqa: F401,E501
from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits # noqa: F401,E501
class SearchHit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'brand': 'str',
'thumbnail_url': 'str',
'media': 'SearchHitMedia',
'url': 'str',
'release_date': 'str',
'categories': 'list[str]',
'product_category': 'str',
'ticker_symbol': 'str',
'style_id': 'str',
'make': 'str',
'model': 'str',
'short_description': 'str',
'gender': 'str',
'colorway': 'str',
'price': 'int',
'description': 'str',
'highest_bid': 'str',
'total_dollars': 'str',
'lowest_ask': 'str',
'last_sale': 'str',
'sales_last_72': 'int',
'deadstock_sold': 'int',
'quality_bid': 'int',
'active': 'int',
'new_release': 'str',
'searchable_traits': 'SearchHitSearchableTraits',
'object_id': 'str',
'annual_high': 'str',
'annual_low': 'str',
'deadstock_range_low': 'str',
'deadstock_range_high': 'str',
'average_deadstock_price': 'str',
'change_value': 'str'
}
attribute_map = {
'name': 'name',
'brand': 'brand',
'thumbnail_url': 'thumbnail_url',
'media': 'media',
'url': 'url',
'release_date': 'release_date',
'categories': 'categories',
'product_category': 'product_category',
'ticker_symbol': 'ticker_symbol',
'style_id': 'style_id',
'make': 'make',
'model': 'model',
'short_description': 'short_description',
'gender': 'gender',
'colorway': 'colorway',
'price': 'price',
'description': 'description',
'highest_bid': 'highest_bid',
'total_dollars': 'total_dollars',
'lowest_ask': 'lowest_ask',
'last_sale': 'last_sale',
'sales_last_72': 'sales_last_72',
'deadstock_sold': 'deadstock_sold',
'quality_bid': 'quality_bid',
'active': 'active',
'new_release': 'new_release',
'searchable_traits': 'searchable_traits',
'object_id': 'objectID',
'annual_high': 'annual_high',
'annual_low': 'annual_low',
'deadstock_range_low': 'deadstock_range_low',
'deadstock_range_high': 'deadstock_range_high',
'average_deadstock_price': 'average_deadstock_price',
'change_value': 'change_value'
}
def __init__(self, name=None, brand=None, thumbnail_url=None, media=None, url=None, release_date=None, categories=None, product_category=None, ticker_symbol=None, style_id=None, make=None, model=None, short_description=None, gender=None, colorway=None, price=None, description=None, highest_bid=None, total_dollars=None, lowest_ask=None, last_sale=None, sales_last_72=None, deadstock_sold=None, quality_bid=None, active=None, new_release=None, searchable_traits=None, object_id=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, average_deadstock_price=None, change_value=None): # noqa: E501
"""SearchHit - a model defined in Swagger""" # noqa: E501
self._name = None
self._brand = None
self._thumbnail_url = None
self._media = None
self._url = None
self._release_date = None
self._categories = None
self._product_category = None
self._ticker_symbol = None
self._style_id = None
self._make = None
self._model = None
self._short_description = None
self._gender = None
self._colorway = None
self._price = None
self._description = None
self._highest_bid = None
self._total_dollars = None
self._lowest_ask = None
self._last_sale = None
self._sales_last_72 = None
self._deadstock_sold = None
self._quality_bid = None
self._active = None
self._new_release = None
self._searchable_traits = None
self._object_id = None
self._annual_high = None
self._annual_low = None
self._deadstock_range_low = None
self._deadstock_range_high = None
self._average_deadstock_price = None
self._change_value = None
self.discriminator = None
if name is not None:
self.name = name
if brand is not None:
self.brand = brand
if thumbnail_url is not None:
self.thumbnail_url = thumbnail_url
if media is not None:
self.media = media
if url is not None:
self.url = url
if release_date is not None:
self.release_date = release_date
if categories is not None:
self.categories = categories
if product_category is not None:
self.product_category = product_category
if ticker_symbol is not None:
self.ticker_symbol = ticker_symbol
if style_id is not None:
self.style_id = style_id
if make is not None:
self.make = make
if model is not None:
self.model = model
if short_description is not None:
self.short_description = short_description
if gender is not None:
self.gender = gender
if colorway is not None:
self.colorway = colorway
if price is not None:
self.price = price
if description is not None:
self.description = description
if highest_bid is not None:
self.highest_bid = highest_bid
if total_dollars is not None:
self.total_dollars = total_dollars
if lowest_ask is not None:
self.lowest_ask = lowest_ask
if last_sale is not None:
self.last_sale = last_sale
if sales_last_72 is not None:
self.sales_last_72 = sales_last_72
if deadstock_sold is not None:
self.deadstock_sold = deadstock_sold
if quality_bid is not None:
self.quality_bid = quality_bid
if active is not None:
self.active = active
if new_release is not None:
self.new_release = new_release
if searchable_traits is not None:
self.searchable_traits = searchable_traits
if object_id is not None:
self.object_id = object_id
if annual_high is not None:
self.annual_high = annual_high
if annual_low is not None:
self.annual_low = annual_low
if deadstock_range_low is not None:
self.deadstock_range_low = deadstock_range_low
if deadstock_range_high is not None:
self.deadstock_range_high = deadstock_range_high
if average_deadstock_price is not None:
self.average_deadstock_price = average_deadstock_price
if change_value is not None:
self.change_value = change_value
@property
def name(self):
"""Gets the name of this SearchHit. # noqa: E501
:return: The name of this SearchHit. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SearchHit.
:param name: The name of this SearchHit. # noqa: E501
:type: str
"""
self._name = name
@property
def brand(self):
"""Gets the brand of this SearchHit. # noqa: E501
:return: The brand of this SearchHit. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this SearchHit.
:param brand: The brand of this SearchHit. # noqa: E501
:type: str
"""
self._brand = brand
@property
def thumbnail_url(self):
"""Gets the thumbnail_url of this SearchHit. # noqa: E501
:return: The thumbnail_url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._thumbnail_url
@thumbnail_url.setter
def thumbnail_url(self, thumbnail_url):
"""Sets the thumbnail_url of this SearchHit.
:param thumbnail_url: The thumbnail_url of this SearchHit. # noqa: E501
:type: str
"""
self._thumbnail_url = thumbnail_url
@property
def media(self):
"""Gets the media of this SearchHit. # noqa: E501
:return: The media of this SearchHit. # noqa: E501
:rtype: SearchHitMedia
"""
return self._media
@media.setter
def media(self, media):
"""Sets the media of this SearchHit.
:param media: The media of this SearchHit. # noqa: E501
:type: SearchHitMedia
"""
self._media = media
@property
def url(self):
"""Gets the url of this SearchHit. # noqa: E501
:return: The url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this SearchHit.
:param url: The url of this SearchHit. # noqa: E501
:type: str
"""
self._url = url
@property
def release_date(self):
"""Gets the release_date of this SearchHit. # noqa: E501
:return: The release_date of this SearchHit. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this SearchHit.
:param release_date: The release_date of this SearchHit. # noqa: E501
:type: str
"""
self._release_date = release_date
@property
def categories(self):
"""Gets the categories of this SearchHit. # noqa: E501
:return: The categories of this SearchHit. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this SearchHit.
:param categories: The categories of this SearchHit. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def product_category(self):
"""Gets the product_category of this SearchHit. # noqa: E501
:return: The product_category of this SearchHit. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this SearchHit.
:param product_category: The product_category of this SearchHit. # noqa: E501
:type: str
"""
self._product_category = product_category
@property
def ticker_symbol(self):
"""Gets the ticker_symbol of this SearchHit. # noqa: E501
:return: The ticker_symbol of this SearchHit. # noqa: E501
:rtype: str
"""
return self._ticker_symbol
@ticker_symbol.setter
def ticker_symbol(self, ticker_symbol):
"""Sets the ticker_symbol of this SearchHit.
:param ticker_symbol: The ticker_symbol of this SearchHit. # noqa: E501
:type: str
"""
self._ticker_symbol = ticker_symbol
@property
def style_id(self):
"""Gets the style_id of this SearchHit. # noqa: E501
:return: The style_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this SearchHit.
:param style_id: The style_id of this SearchHit. # noqa: E501
:type: str
"""
self._style_id = style_id
@property
def make(self):
"""Gets the make of this SearchHit. # noqa: E501
:return: The make of this SearchHit. # noqa: E501
:rtype: str
"""
return self._make
@make.setter
def make(self, make):
"""Sets the make of this SearchHit.
:param make: The make of this SearchHit. # noqa: E501
:type: str
"""
self._make = make
@property
def model(self):
"""Gets the model of this SearchHit. # noqa: E501
:return: The model of this SearchHit. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this SearchHit.
:param model: The model of this SearchHit. # noqa: E501
:type: str
"""
self._model = model
@property
def short_description(self):
"""Gets the short_description of this SearchHit. # noqa: E501
:return: The short_description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this SearchHit.
:param short_description: The short_description of this SearchHit. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def gender(self):
"""Gets the gender of this SearchHit. # noqa: E501
:return: The gender of this SearchHit. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this SearchHit.
:param gender: The gender of this SearchHit. # noqa: E501
:type: str
"""
self._gender = gender
@property
def colorway(self):
"""Gets the colorway of this SearchHit. # noqa: E501
:return: The colorway of this SearchHit. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this SearchHit.
:param colorway: The colorway of this SearchHit. # noqa: E501
:type: str
"""
self._colorway = colorway
@property
def price(self):
"""Gets the price of this SearchHit. # noqa: E501
:return: The price of this SearchHit. # noqa: E501
:rtype: int
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this SearchHit.
:param price: The price of this SearchHit. # noqa: E501
:type: int
"""
self._price = price
@property
def description(self):
"""Gets the description of this SearchHit. # noqa: E501
:return: The description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SearchHit.
:param description: The description of this SearchHit. # noqa: E501
:type: str
"""
self._description = description
@property
def highest_bid(self):
"""Gets the highest_bid of this SearchHit. # noqa: E501
:return: The highest_bid of this SearchHit. # noqa: E501
:rtype: str
"""
return self._highest_bid
@highest_bid.setter
def highest_bid(self, highest_bid):
"""Sets the highest_bid of this SearchHit.
:param highest_bid: The highest_bid of this SearchHit. # noqa: E501
:type: str
"""
self._highest_bid = highest_bid
@property
def total_dollars(self):
"""Gets the total_dollars of this SearchHit. # noqa: E501
:return: The total_dollars of this SearchHit. # noqa: E501
:rtype: str
"""
return self._total_dollars
@total_dollars.setter
def total_dollars(self, total_dollars):
"""Sets the total_dollars of this SearchHit.
:param total_dollars: The total_dollars of this SearchHit. # noqa: E501
:type: str
"""
self._total_dollars = total_dollars
@property
def lowest_ask(self):
"""Gets the lowest_ask of this SearchHit. # noqa: E501
:return: The lowest_ask of this SearchHit. # noqa: E501
:rtype: str
"""
return self._lowest_ask
@lowest_ask.setter
def lowest_ask(self, lowest_ask):
"""Sets the lowest_ask of this SearchHit.
:param lowest_ask: The lowest_ask of this SearchHit. # noqa: E501
:type: str
"""
self._lowest_ask = lowest_ask
@property
def last_sale(self):
"""Gets the last_sale of this SearchHit. # noqa: E501
:return: The last_sale of this SearchHit. # noqa: E501
:rtype: str
"""
return self._last_sale
@last_sale.setter
def last_sale(self, last_sale):
"""Sets the last_sale of this SearchHit.
:param last_sale: The last_sale of this SearchHit. # noqa: E501
:type: str
"""
self._last_sale = last_sale
@property
def sales_last_72(self):
"""Gets the sales_last_72 of this SearchHit. # noqa: E501
:return: The sales_last_72 of this SearchHit. # noqa: E501
:rtype: int
"""
return self._sales_last_72
@sales_last_72.setter
def sales_last_72(self, sales_last_72):
"""Sets the sales_last_72 of this SearchHit.
:param sales_last_72: The sales_last_72 of this SearchHit. # noqa: E501
:type: int
"""
self._sales_last_72 = sales_last_72
@property
def deadstock_sold(self):
"""Gets the deadstock_sold of this SearchHit. # noqa: E501
:return: The deadstock_sold of this SearchHit. # noqa: E501
:rtype: int
"""
return self._deadstock_sold
@deadstock_sold.setter
def deadstock_sold(self, deadstock_sold):
"""Sets the deadstock_sold of this SearchHit.
:param deadstock_sold: The deadstock_sold of this SearchHit. # noqa: E501
:type: int
"""
self._deadstock_sold = deadstock_sold
@property
def quality_bid(self):
"""Gets the quality_bid of this SearchHit. # noqa: E501
:return: The quality_bid of this SearchHit. # noqa: E501
:rtype: int
"""
return self._quality_bid
@quality_bid.setter
def quality_bid(self, quality_bid):
"""Sets the quality_bid of this SearchHit.
:param quality_bid: The quality_bid of this SearchHit. # noqa: E501
:type: int
"""
self._quality_bid = quality_bid
@property
def active(self):
"""Gets the active of this SearchHit. # noqa: E501
:return: The active of this SearchHit. # noqa: E501
:rtype: int
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this SearchHit.
:param active: The active of this SearchHit. # noqa: E501
:type: int
"""
self._active = active
@property
def new_release(self):
"""Gets the new_release of this SearchHit. # noqa: E501
:return: The new_release of this SearchHit. # noqa: E501
:rtype: str
"""
return self._new_release
@new_release.setter
def new_release(self, new_release):
"""Sets the new_release of this SearchHit.
:param new_release: The new_release of this SearchHit. # noqa: E501
:type: str
"""
self._new_release = new_release
@property
def searchable_traits(self):
"""Gets the searchable_traits of this SearchHit. # noqa: E501
:return: The searchable_traits of this SearchHit. # noqa: E501
:rtype: SearchHitSearchableTraits
"""
return self._searchable_traits
@searchable_traits.setter
def searchable_traits(self, searchable_traits):
"""Sets the searchable_traits of this SearchHit.
:param searchable_traits: The searchable_traits of this SearchHit. # noqa: E501
:type: SearchHitSearchableTraits
"""
self._searchable_traits = searchable_traits
@property
def object_id(self):
"""Gets the object_id of this SearchHit. # noqa: E501
:return: The object_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._object_id
@object_id.setter
def object_id(self, object_id):
"""Sets the object_id of this SearchHit.
:param object_id: The object_id of this SearchHit. # noqa: E501
:type: str
"""
self._object_id = object_id
@property
def annual_high(self):
"""Gets the annual_high of this SearchHit. # noqa: E501
:return: The annual_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_high
@annual_high.setter
def annual_high(self, annual_high):
"""Sets the annual_high of this SearchHit.
:param annual_high: The annual_high of this SearchHit. # noqa: E501
:type: str
"""
self._annual_high = annual_high
@property
def annual_low(self):
"""Gets the annual_low of this SearchHit. # noqa: E501
:return: The annual_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_low
@annual_low.setter
def annual_low(self, annual_low):
"""Sets the annual_low of this SearchHit.
:param annual_low: The annual_low of this SearchHit. # noqa: E501
:type: str
"""
self._annual_low = annual_low
@property
def deadstock_range_low(self):
"""Gets the deadstock_range_low of this SearchHit. # noqa: E501
:return: The deadstock_range_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_low
@deadstock_range_low.setter
def deadstock_range_low(self, deadstock_range_low):
"""Sets the deadstock_range_low of this SearchHit.
:param deadstock_range_low: The deadstock_range_low of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_low = deadstock_range_low
@property
def deadstock_range_high(self):
"""Gets the deadstock_range_high of this SearchHit. # noqa: E501
:return: The deadstock_range_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_high
@deadstock_range_high.setter
def deadstock_range_high(self, deadstock_range_high):
"""Sets the deadstock_range_high of this SearchHit.
:param deadstock_range_high: The deadstock_range_high of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_high = deadstock_range_high
@property
def average_deadstock_price(self):
"""Gets the average_deadstock_price of this SearchHit. # noqa: E501
:return: The average_deadstock_price of this SearchHit. # noqa: E501
:rtype: str
"""
return self._average_deadstock_price
@average_deadstock_price.setter
def average_deadstock_price(self, average_deadstock_price):
"""Sets the average_deadstock_price of this SearchHit.
:param average_deadstock_price: The average_deadstock_price of this SearchHit. # noqa: E501
:type: str
"""
self._average_deadstock_price = average_deadstock_price
@property
def change_value(self):
"""Gets the change_value of this SearchHit. # noqa: E501
:return: The change_value of this SearchHit. # noqa: E501
:rtype: str
"""
return self._change_value
@change_value.setter
def change_value(self, change_value):
"""Sets the change_value of this SearchHit.
:param change_value: The change_value of this SearchHit. # noqa: E501
:type: str
"""
self._change_value = change_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py"
],
"imports": [
"/sdk/python/lib/io_stockx/models/search_hit.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/io_stockx/models/customer_object.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.billing_object import BillingObject # noqa: F401,E501
from io_stockx.models.customer_object_merchant import CustomerObjectMerchant # noqa: F401,E501
from io_stockx.models.customer_object_security import CustomerObjectSecurity # noqa: F401,E501
from io_stockx.models.customer_object_shipping import CustomerObjectShipping # noqa: F401,E501
class CustomerObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'uuid': 'str',
'first_name': 'str',
'last_name': 'str',
'full_name': 'str',
'email': 'str',
'username': 'str',
'email_verified': 'bool',
'default_size': 'str',
'categories': 'list[str]',
'default_category': 'str',
'vacation_date': 'str',
'is_active': 'bool',
'flagged': 'bool',
'hide_portfolio_banner': 'bool',
'refer_url': 'str',
'created_at': 'str',
'created_at_time': 'float',
'is_trader': 'bool',
'ship_by_date': 'bool',
'is_buying': 'bool',
'is_selling': 'bool',
'billing': 'BillingObject',
'shipping': 'CustomerObjectShipping',
'cc_only': 'BillingObject',
'merchant': 'CustomerObjectMerchant',
'promotion_code': 'str',
'paypal_emails': 'str',
'authorization_method': 'str',
'security_override': 'bool',
'team_member': 'bool',
'password_locked': 'bool',
'address_normalize_override': 'bool',
'early_payout_enabled': 'bool',
'early_payout_eligible': 'bool',
'security': 'CustomerObjectSecurity'
}
attribute_map = {
'id': 'id',
'uuid': 'uuid',
'first_name': 'firstName',
'last_name': 'lastName',
'full_name': 'fullName',
'email': 'email',
'username': 'username',
'email_verified': 'emailVerified',
'default_size': 'defaultSize',
'categories': 'categories',
'default_category': 'defaultCategory',
'vacation_date': 'vacationDate',
'is_active': 'isActive',
'flagged': 'flagged',
'hide_portfolio_banner': 'hidePortfolioBanner',
'refer_url': 'referUrl',
'created_at': 'createdAt',
'created_at_time': 'createdAtTime',
'is_trader': 'isTrader',
'ship_by_date': 'shipByDate',
'is_buying': 'isBuying',
'is_selling': 'isSelling',
'billing': 'Billing',
'shipping': 'Shipping',
'cc_only': 'CCOnly',
'merchant': 'Merchant',
'promotion_code': 'promotionCode',
'paypal_emails': 'paypalEmails',
'authorization_method': 'authorizationMethod',
'security_override': 'securityOverride',
'team_member': 'teamMember',
'password_locked': 'passwordLocked',
'address_normalize_override': 'addressNormalizeOverride',
'early_payout_enabled': 'earlyPayoutEnabled',
'early_payout_eligible': 'earlyPayoutEligible',
'security': 'security'
}
def __init__(self, id=None, uuid=None, first_name=None, last_name=None, full_name=None, email=None, username=None, email_verified=None, default_size=None, categories=None, default_category=None, vacation_date=None, is_active=None, flagged=None, hide_portfolio_banner=None, refer_url=None, created_at=None, created_at_time=None, is_trader=None, ship_by_date=None, is_buying=None, is_selling=None, billing=None, shipping=None, cc_only=None, merchant=None, promotion_code=None, paypal_emails=None, authorization_method=None, security_override=None, team_member=None, password_locked=None, address_normalize_override=None, early_payout_enabled=None, early_payout_eligible=None, security=None): # noqa: E501
"""CustomerObject - a model defined in Swagger""" # noqa: E501
self._id = None
self._uuid = None
self._first_name = None
self._last_name = None
self._full_name = None
self._email = None
self._username = None
self._email_verified = None
self._default_size = None
self._categories = None
self._default_category = None
self._vacation_date = None
self._is_active = None
self._flagged = None
self._hide_portfolio_banner = None
self._refer_url = None
self._created_at = None
self._created_at_time = None
self._is_trader = None
self._ship_by_date = None
self._is_buying = None
self._is_selling = None
self._billing = None
self._shipping = None
self._cc_only = None
self._merchant = None
self._promotion_code = None
self._paypal_emails = None
self._authorization_method = None
self._security_override = None
self._team_member = None
self._password_locked = None
self._address_normalize_override = None
self._early_payout_enabled = None
self._early_payout_eligible = None
self._security = None
self.discriminator = None
if id is not None:
self.id = id
if uuid is not None:
self.uuid = uuid
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if full_name is not None:
self.full_name = full_name
if email is not None:
self.email = email
if username is not None:
self.username = username
if email_verified is not None:
self.email_verified = email_verified
if default_size is not None:
self.default_size = default_size
if categories is not None:
self.categories = categories
if default_category is not None:
self.default_category = default_category
if vacation_date is not None:
self.vacation_date = vacation_date
if is_active is not None:
self.is_active = is_active
if flagged is not None:
self.flagged = flagged
if hide_portfolio_banner is not None:
self.hide_portfolio_banner = hide_portfolio_banner
if refer_url is not None:
self.refer_url = refer_url
if created_at is not None:
self.created_at = created_at
if created_at_time is not None:
self.created_at_time = created_at_time
if is_trader is not None:
self.is_trader = is_trader
if ship_by_date is not None:
self.ship_by_date = ship_by_date
if is_buying is not None:
self.is_buying = is_buying
if is_selling is not None:
self.is_selling = is_selling
if billing is not None:
self.billing = billing
if shipping is not None:
self.shipping = shipping
if cc_only is not None:
self.cc_only = cc_only
if merchant is not None:
self.merchant = merchant
if promotion_code is not None:
self.promotion_code = promotion_code
if paypal_emails is not None:
self.paypal_emails = paypal_emails
if authorization_method is not None:
self.authorization_method = authorization_method
if security_override is not None:
self.security_override = security_override
if team_member is not None:
self.team_member = team_member
if password_locked is not None:
self.password_locked = password_locked
if address_normalize_override is not None:
self.address_normalize_override = address_normalize_override
if early_payout_enabled is not None:
self.early_payout_enabled = early_payout_enabled
if early_payout_eligible is not None:
self.early_payout_eligible = early_payout_eligible
if security is not None:
self.security = security
@property
def id(self):
"""Gets the id of this CustomerObject. # noqa: E501
:return: The id of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CustomerObject.
:param id: The id of this CustomerObject. # noqa: E501
:type: str
"""
self._id = id
@property
def uuid(self):
"""Gets the uuid of this CustomerObject. # noqa: E501
:return: The uuid of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this CustomerObject.
:param uuid: The uuid of this CustomerObject. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def first_name(self):
"""Gets the first_name of this CustomerObject. # noqa: E501
:return: The first_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this CustomerObject.
:param first_name: The first_name of this CustomerObject. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this CustomerObject. # noqa: E501
:return: The last_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this CustomerObject.
:param last_name: The last_name of this CustomerObject. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def full_name(self):
"""Gets the full_name of this CustomerObject. # noqa: E501
:return: The full_name of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this CustomerObject.
:param full_name: The full_name of this CustomerObject. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def email(self):
"""Gets the email of this CustomerObject. # noqa: E501
:return: The email of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this CustomerObject.
:param email: The email of this CustomerObject. # noqa: E501
:type: str
"""
self._email = email
@property
def username(self):
"""Gets the username of this CustomerObject. # noqa: E501
:return: The username of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CustomerObject.
:param username: The username of this CustomerObject. # noqa: E501
:type: str
"""
self._username = username
@property
def email_verified(self):
"""Gets the email_verified of this CustomerObject. # noqa: E501
:return: The email_verified of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._email_verified
@email_verified.setter
def email_verified(self, email_verified):
"""Sets the email_verified of this CustomerObject.
:param email_verified: The email_verified of this CustomerObject. # noqa: E501
:type: bool
"""
self._email_verified = email_verified
@property
def default_size(self):
"""Gets the default_size of this CustomerObject. # noqa: E501
:return: The default_size of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._default_size
@default_size.setter
def default_size(self, default_size):
"""Sets the default_size of this CustomerObject.
:param default_size: The default_size of this CustomerObject. # noqa: E501
:type: str
"""
self._default_size = default_size
@property
def categories(self):
"""Gets the categories of this CustomerObject. # noqa: E501
:return: The categories of this CustomerObject. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this CustomerObject.
:param categories: The categories of this CustomerObject. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def default_category(self):
"""Gets the default_category of this CustomerObject. # noqa: E501
:return: The default_category of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._default_category
@default_category.setter
def default_category(self, default_category):
"""Sets the default_category of this CustomerObject.
:param default_category: The default_category of this CustomerObject. # noqa: E501
:type: str
"""
self._default_category = default_category
@property
def vacation_date(self):
"""Gets the vacation_date of this CustomerObject. # noqa: E501
:return: The vacation_date of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._vacation_date
@vacation_date.setter
def vacation_date(self, vacation_date):
"""Sets the vacation_date of this CustomerObject.
:param vacation_date: The vacation_date of this CustomerObject. # noqa: E501
:type: str
"""
self._vacation_date = vacation_date
@property
def is_active(self):
"""Gets the is_active of this CustomerObject. # noqa: E501
:return: The is_active of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active):
"""Sets the is_active of this CustomerObject.
:param is_active: The is_active of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_active = is_active
@property
def flagged(self):
"""Gets the flagged of this CustomerObject. # noqa: E501
:return: The flagged of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._flagged
@flagged.setter
def flagged(self, flagged):
"""Sets the flagged of this CustomerObject.
:param flagged: The flagged of this CustomerObject. # noqa: E501
:type: bool
"""
self._flagged = flagged
@property
def hide_portfolio_banner(self):
"""Gets the hide_portfolio_banner of this CustomerObject. # noqa: E501
:return: The hide_portfolio_banner of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._hide_portfolio_banner
@hide_portfolio_banner.setter
def hide_portfolio_banner(self, hide_portfolio_banner):
"""Sets the hide_portfolio_banner of this CustomerObject.
:param hide_portfolio_banner: The hide_portfolio_banner of this CustomerObject. # noqa: E501
:type: bool
"""
self._hide_portfolio_banner = hide_portfolio_banner
@property
def refer_url(self):
"""Gets the refer_url of this CustomerObject. # noqa: E501
:return: The refer_url of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._refer_url
@refer_url.setter
def refer_url(self, refer_url):
"""Sets the refer_url of this CustomerObject.
:param refer_url: The refer_url of this CustomerObject. # noqa: E501
:type: str
"""
self._refer_url = refer_url
@property
def created_at(self):
"""Gets the created_at of this CustomerObject. # noqa: E501
:return: The created_at of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this CustomerObject.
:param created_at: The created_at of this CustomerObject. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def created_at_time(self):
"""Gets the created_at_time of this CustomerObject. # noqa: E501
:return: The created_at_time of this CustomerObject. # noqa: E501
:rtype: float
"""
return self._created_at_time
@created_at_time.setter
def created_at_time(self, created_at_time):
"""Sets the created_at_time of this CustomerObject.
:param created_at_time: The created_at_time of this CustomerObject. # noqa: E501
:type: float
"""
self._created_at_time = created_at_time
@property
def is_trader(self):
"""Gets the is_trader of this CustomerObject. # noqa: E501
:return: The is_trader of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_trader
@is_trader.setter
def is_trader(self, is_trader):
"""Sets the is_trader of this CustomerObject.
:param is_trader: The is_trader of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_trader = is_trader
@property
def ship_by_date(self):
"""Gets the ship_by_date of this CustomerObject. # noqa: E501
:return: The ship_by_date of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._ship_by_date
@ship_by_date.setter
def ship_by_date(self, ship_by_date):
"""Sets the ship_by_date of this CustomerObject.
:param ship_by_date: The ship_by_date of this CustomerObject. # noqa: E501
:type: bool
"""
self._ship_by_date = ship_by_date
@property
def is_buying(self):
"""Gets the is_buying of this CustomerObject. # noqa: E501
:return: The is_buying of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_buying
@is_buying.setter
def is_buying(self, is_buying):
"""Sets the is_buying of this CustomerObject.
:param is_buying: The is_buying of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_buying = is_buying
@property
def is_selling(self):
"""Gets the is_selling of this CustomerObject. # noqa: E501
:return: The is_selling of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._is_selling
@is_selling.setter
def is_selling(self, is_selling):
"""Sets the is_selling of this CustomerObject.
:param is_selling: The is_selling of this CustomerObject. # noqa: E501
:type: bool
"""
self._is_selling = is_selling
@property
def billing(self):
"""Gets the billing of this CustomerObject. # noqa: E501
:return: The billing of this CustomerObject. # noqa: E501
:rtype: BillingObject
"""
return self._billing
@billing.setter
def billing(self, billing):
"""Sets the billing of this CustomerObject.
:param billing: The billing of this CustomerObject. # noqa: E501
:type: BillingObject
"""
self._billing = billing
@property
def shipping(self):
"""Gets the shipping of this CustomerObject. # noqa: E501
:return: The shipping of this CustomerObject. # noqa: E501
:rtype: CustomerObjectShipping
"""
return self._shipping
@shipping.setter
def shipping(self, shipping):
"""Sets the shipping of this CustomerObject.
:param shipping: The shipping of this CustomerObject. # noqa: E501
:type: CustomerObjectShipping
"""
self._shipping = shipping
@property
def cc_only(self):
"""Gets the cc_only of this CustomerObject. # noqa: E501
:return: The cc_only of this CustomerObject. # noqa: E501
:rtype: BillingObject
"""
return self._cc_only
@cc_only.setter
def cc_only(self, cc_only):
"""Sets the cc_only of this CustomerObject.
:param cc_only: The cc_only of this CustomerObject. # noqa: E501
:type: BillingObject
"""
self._cc_only = cc_only
@property
def merchant(self):
"""Gets the merchant of this CustomerObject. # noqa: E501
:return: The merchant of this CustomerObject. # noqa: E501
:rtype: CustomerObjectMerchant
"""
return self._merchant
@merchant.setter
def merchant(self, merchant):
"""Sets the merchant of this CustomerObject.
:param merchant: The merchant of this CustomerObject. # noqa: E501
:type: CustomerObjectMerchant
"""
self._merchant = merchant
@property
def promotion_code(self):
"""Gets the promotion_code of this CustomerObject. # noqa: E501
:return: The promotion_code of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._promotion_code
@promotion_code.setter
def promotion_code(self, promotion_code):
"""Sets the promotion_code of this CustomerObject.
:param promotion_code: The promotion_code of this CustomerObject. # noqa: E501
:type: str
"""
self._promotion_code = promotion_code
@property
def paypal_emails(self):
"""Gets the paypal_emails of this CustomerObject. # noqa: E501
:return: The paypal_emails of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._paypal_emails
@paypal_emails.setter
def paypal_emails(self, paypal_emails):
"""Sets the paypal_emails of this CustomerObject.
:param paypal_emails: The paypal_emails of this CustomerObject. # noqa: E501
:type: str
"""
self._paypal_emails = paypal_emails
@property
def authorization_method(self):
"""Gets the authorization_method of this CustomerObject. # noqa: E501
:return: The authorization_method of this CustomerObject. # noqa: E501
:rtype: str
"""
return self._authorization_method
@authorization_method.setter
def authorization_method(self, authorization_method):
"""Sets the authorization_method of this CustomerObject.
:param authorization_method: The authorization_method of this CustomerObject. # noqa: E501
:type: str
"""
self._authorization_method = authorization_method
@property
def security_override(self):
"""Gets the security_override of this CustomerObject. # noqa: E501
:return: The security_override of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._security_override
@security_override.setter
def security_override(self, security_override):
"""Sets the security_override of this CustomerObject.
:param security_override: The security_override of this CustomerObject. # noqa: E501
:type: bool
"""
self._security_override = security_override
@property
def team_member(self):
"""Gets the team_member of this CustomerObject. # noqa: E501
:return: The team_member of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._team_member
@team_member.setter
def team_member(self, team_member):
"""Sets the team_member of this CustomerObject.
:param team_member: The team_member of this CustomerObject. # noqa: E501
:type: bool
"""
self._team_member = team_member
@property
def password_locked(self):
"""Gets the password_locked of this CustomerObject. # noqa: E501
:return: The password_locked of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._password_locked
@password_locked.setter
def password_locked(self, password_locked):
"""Sets the password_locked of this CustomerObject.
:param password_locked: The password_locked of this CustomerObject. # noqa: E501
:type: bool
"""
self._password_locked = password_locked
@property
def address_normalize_override(self):
"""Gets the address_normalize_override of this CustomerObject. # noqa: E501
:return: The address_normalize_override of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._address_normalize_override
@address_normalize_override.setter
def address_normalize_override(self, address_normalize_override):
"""Sets the address_normalize_override of this CustomerObject.
:param address_normalize_override: The address_normalize_override of this CustomerObject. # noqa: E501
:type: bool
"""
self._address_normalize_override = address_normalize_override
@property
def early_payout_enabled(self):
"""Gets the early_payout_enabled of this CustomerObject. # noqa: E501
:return: The early_payout_enabled of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._early_payout_enabled
@early_payout_enabled.setter
def early_payout_enabled(self, early_payout_enabled):
"""Sets the early_payout_enabled of this CustomerObject.
:param early_payout_enabled: The early_payout_enabled of this CustomerObject. # noqa: E501
:type: bool
"""
self._early_payout_enabled = early_payout_enabled
@property
def early_payout_eligible(self):
"""Gets the early_payout_eligible of this CustomerObject. # noqa: E501
:return: The early_payout_eligible of this CustomerObject. # noqa: E501
:rtype: bool
"""
return self._early_payout_eligible
@early_payout_eligible.setter
def early_payout_eligible(self, early_payout_eligible):
"""Sets the early_payout_eligible of this CustomerObject.
:param early_payout_eligible: The early_payout_eligible of this CustomerObject. # noqa: E501
:type: bool
"""
self._early_payout_eligible = early_payout_eligible
@property
def security(self):
"""Gets the security of this CustomerObject. # noqa: E501
:return: The security of this CustomerObject. # noqa: E501
:rtype: CustomerObjectSecurity
"""
return self._security
@security.setter
def security(self, security):
"""Sets the security of this CustomerObject.
:param security: The security of this CustomerObject. # noqa: E501
:type: CustomerObjectSecurity
"""
self._security = security
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CustomerObjectMerchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'merchant_id': 'str',
'paypal_email': 'str',
'preferred_payout': 'str',
'account_name': 'str'
}
attribute_map = {
'merchant_id': 'merchantId',
'paypal_email': 'paypalEmail',
'preferred_payout': 'preferredPayout',
'account_name': 'accountName'
}
def __init__(self, merchant_id=None, paypal_email=None, preferred_payout=None, account_name=None): # noqa: E501
"""CustomerObjectMerchant - a model defined in Swagger""" # noqa: E501
self._merchant_id = None
self._paypal_email = None
self._preferred_payout = None
self._account_name = None
self.discriminator = None
if merchant_id is not None:
self.merchant_id = merchant_id
if paypal_email is not None:
self.paypal_email = paypal_email
if preferred_payout is not None:
self.preferred_payout = preferred_payout
if account_name is not None:
self.account_name = account_name
@property
def merchant_id(self):
"""Gets the merchant_id of this CustomerObjectMerchant. # noqa: E501
:return: The merchant_id of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this CustomerObjectMerchant.
:param merchant_id: The merchant_id of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._merchant_id = merchant_id
@property
def paypal_email(self):
"""Gets the paypal_email of this CustomerObjectMerchant. # noqa: E501
:return: The paypal_email of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._paypal_email
@paypal_email.setter
def paypal_email(self, paypal_email):
"""Sets the paypal_email of this CustomerObjectMerchant.
:param paypal_email: The paypal_email of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._paypal_email = paypal_email
@property
def preferred_payout(self):
"""Gets the preferred_payout of this CustomerObjectMerchant. # noqa: E501
:return: The preferred_payout of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._preferred_payout
@preferred_payout.setter
def preferred_payout(self, preferred_payout):
"""Sets the preferred_payout of this CustomerObjectMerchant.
:param preferred_payout: The preferred_payout of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._preferred_payout = preferred_payout
@property
def account_name(self):
"""Gets the account_name of this CustomerObjectMerchant. # noqa: E501
:return: The account_name of this CustomerObjectMerchant. # noqa: E501
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this CustomerObjectMerchant.
:param account_name: The account_name of this CustomerObjectMerchant. # noqa: E501
:type: str
"""
self._account_name = account_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerObjectMerchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
--- FILE SEPARATOR ---
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.address_object import AddressObject # noqa: F401,E501
class BillingObject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'card_type': 'str',
'token': 'str',
'last4': 'str',
'account_email': 'str',
'expiration_date': 'str',
'cardholder_name': 'str',
'address': 'AddressObject'
}
attribute_map = {
'card_type': 'cardType',
'token': 'token',
'last4': 'last4',
'account_email': 'accountEmail',
'expiration_date': 'expirationDate',
'cardholder_name': 'cardholderName',
'address': 'Address'
}
def __init__(self, card_type=None, token=None, last4=None, account_email=None, expiration_date=None, cardholder_name=None, address=None): # noqa: E501
"""BillingObject - a model defined in Swagger""" # noqa: E501
self._card_type = None
self._token = None
self._last4 = None
self._account_email = None
self._expiration_date = None
self._cardholder_name = None
self._address = None
self.discriminator = None
if card_type is not None:
self.card_type = card_type
if token is not None:
self.token = token
if last4 is not None:
self.last4 = last4
if account_email is not None:
self.account_email = account_email
if expiration_date is not None:
self.expiration_date = expiration_date
if cardholder_name is not None:
self.cardholder_name = cardholder_name
if address is not None:
self.address = address
@property
def card_type(self):
"""Gets the card_type of this BillingObject. # noqa: E501
:return: The card_type of this BillingObject. # noqa: E501
:rtype: str
"""
return self._card_type
@card_type.setter
def card_type(self, card_type):
"""Sets the card_type of this BillingObject.
:param card_type: The card_type of this BillingObject. # noqa: E501
:type: str
"""
self._card_type = card_type
@property
def token(self):
"""Gets the token of this BillingObject. # noqa: E501
:return: The token of this BillingObject. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this BillingObject.
:param token: The token of this BillingObject. # noqa: E501
:type: str
"""
self._token = token
@property
def last4(self):
"""Gets the last4 of this BillingObject. # noqa: E501
:return: The last4 of this BillingObject. # noqa: E501
:rtype: str
"""
return self._last4
@last4.setter
def last4(self, last4):
"""Sets the last4 of this BillingObject.
:param last4: The last4 of this BillingObject. # noqa: E501
:type: str
"""
self._last4 = last4
@property
def account_email(self):
"""Gets the account_email of this BillingObject. # noqa: E501
:return: The account_email of this BillingObject. # noqa: E501
:rtype: str
"""
return self._account_email
@account_email.setter
def account_email(self, account_email):
"""Sets the account_email of this BillingObject.
:param account_email: The account_email of this BillingObject. # noqa: E501
:type: str
"""
self._account_email = account_email
@property
def expiration_date(self):
"""Gets the expiration_date of this BillingObject. # noqa: E501
:return: The expiration_date of this BillingObject. # noqa: E501
:rtype: str
"""
return self._expiration_date
@expiration_date.setter
def expiration_date(self, expiration_date):
"""Sets the expiration_date of this BillingObject.
:param expiration_date: The expiration_date of this BillingObject. # noqa: E501
:type: str
"""
self._expiration_date = expiration_date
@property
def cardholder_name(self):
"""Gets the cardholder_name of this BillingObject. # noqa: E501
:return: The cardholder_name of this BillingObject. # noqa: E501
:rtype: str
"""
return self._cardholder_name
@cardholder_name.setter
def cardholder_name(self, cardholder_name):
"""Sets the cardholder_name of this BillingObject.
:param cardholder_name: The cardholder_name of this BillingObject. # noqa: E501
:type: str
"""
self._cardholder_name = cardholder_name
@property
def address(self):
"""Gets the address of this BillingObject. # noqa: E501
:return: The address of this BillingObject. # noqa: E501
:rtype: AddressObject
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this BillingObject.
:param address: The address of this BillingObject. # noqa: E501
:type: AddressObject
"""
self._address = address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BillingObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py"
],
"imports": [
"/sdk/python/lib/io_stockx/models/customer_object_merchant.py",
"/sdk/python/lib/build/lib/io_stockx/models/billing_object.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/io_stockx/models/search_hit.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from io_stockx.models.search_hit_media import SearchHitMedia # noqa: F401,E501
from io_stockx.models.search_hit_searchable_traits import SearchHitSearchableTraits # noqa: F401,E501
class SearchHit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'brand': 'str',
'thumbnail_url': 'str',
'media': 'SearchHitMedia',
'url': 'str',
'release_date': 'str',
'categories': 'list[str]',
'product_category': 'str',
'ticker_symbol': 'str',
'style_id': 'str',
'make': 'str',
'model': 'str',
'short_description': 'str',
'gender': 'str',
'colorway': 'str',
'price': 'int',
'description': 'str',
'highest_bid': 'str',
'total_dollars': 'str',
'lowest_ask': 'str',
'last_sale': 'str',
'sales_last_72': 'int',
'deadstock_sold': 'int',
'quality_bid': 'int',
'active': 'int',
'new_release': 'str',
'searchable_traits': 'SearchHitSearchableTraits',
'object_id': 'str',
'annual_high': 'str',
'annual_low': 'str',
'deadstock_range_low': 'str',
'deadstock_range_high': 'str',
'average_deadstock_price': 'str',
'change_value': 'str'
}
attribute_map = {
'name': 'name',
'brand': 'brand',
'thumbnail_url': 'thumbnail_url',
'media': 'media',
'url': 'url',
'release_date': 'release_date',
'categories': 'categories',
'product_category': 'product_category',
'ticker_symbol': 'ticker_symbol',
'style_id': 'style_id',
'make': 'make',
'model': 'model',
'short_description': 'short_description',
'gender': 'gender',
'colorway': 'colorway',
'price': 'price',
'description': 'description',
'highest_bid': 'highest_bid',
'total_dollars': 'total_dollars',
'lowest_ask': 'lowest_ask',
'last_sale': 'last_sale',
'sales_last_72': 'sales_last_72',
'deadstock_sold': 'deadstock_sold',
'quality_bid': 'quality_bid',
'active': 'active',
'new_release': 'new_release',
'searchable_traits': 'searchable_traits',
'object_id': 'objectID',
'annual_high': 'annual_high',
'annual_low': 'annual_low',
'deadstock_range_low': 'deadstock_range_low',
'deadstock_range_high': 'deadstock_range_high',
'average_deadstock_price': 'average_deadstock_price',
'change_value': 'change_value'
}
def __init__(self, name=None, brand=None, thumbnail_url=None, media=None, url=None, release_date=None, categories=None, product_category=None, ticker_symbol=None, style_id=None, make=None, model=None, short_description=None, gender=None, colorway=None, price=None, description=None, highest_bid=None, total_dollars=None, lowest_ask=None, last_sale=None, sales_last_72=None, deadstock_sold=None, quality_bid=None, active=None, new_release=None, searchable_traits=None, object_id=None, annual_high=None, annual_low=None, deadstock_range_low=None, deadstock_range_high=None, average_deadstock_price=None, change_value=None): # noqa: E501
"""SearchHit - a model defined in Swagger""" # noqa: E501
self._name = None
self._brand = None
self._thumbnail_url = None
self._media = None
self._url = None
self._release_date = None
self._categories = None
self._product_category = None
self._ticker_symbol = None
self._style_id = None
self._make = None
self._model = None
self._short_description = None
self._gender = None
self._colorway = None
self._price = None
self._description = None
self._highest_bid = None
self._total_dollars = None
self._lowest_ask = None
self._last_sale = None
self._sales_last_72 = None
self._deadstock_sold = None
self._quality_bid = None
self._active = None
self._new_release = None
self._searchable_traits = None
self._object_id = None
self._annual_high = None
self._annual_low = None
self._deadstock_range_low = None
self._deadstock_range_high = None
self._average_deadstock_price = None
self._change_value = None
self.discriminator = None
if name is not None:
self.name = name
if brand is not None:
self.brand = brand
if thumbnail_url is not None:
self.thumbnail_url = thumbnail_url
if media is not None:
self.media = media
if url is not None:
self.url = url
if release_date is not None:
self.release_date = release_date
if categories is not None:
self.categories = categories
if product_category is not None:
self.product_category = product_category
if ticker_symbol is not None:
self.ticker_symbol = ticker_symbol
if style_id is not None:
self.style_id = style_id
if make is not None:
self.make = make
if model is not None:
self.model = model
if short_description is not None:
self.short_description = short_description
if gender is not None:
self.gender = gender
if colorway is not None:
self.colorway = colorway
if price is not None:
self.price = price
if description is not None:
self.description = description
if highest_bid is not None:
self.highest_bid = highest_bid
if total_dollars is not None:
self.total_dollars = total_dollars
if lowest_ask is not None:
self.lowest_ask = lowest_ask
if last_sale is not None:
self.last_sale = last_sale
if sales_last_72 is not None:
self.sales_last_72 = sales_last_72
if deadstock_sold is not None:
self.deadstock_sold = deadstock_sold
if quality_bid is not None:
self.quality_bid = quality_bid
if active is not None:
self.active = active
if new_release is not None:
self.new_release = new_release
if searchable_traits is not None:
self.searchable_traits = searchable_traits
if object_id is not None:
self.object_id = object_id
if annual_high is not None:
self.annual_high = annual_high
if annual_low is not None:
self.annual_low = annual_low
if deadstock_range_low is not None:
self.deadstock_range_low = deadstock_range_low
if deadstock_range_high is not None:
self.deadstock_range_high = deadstock_range_high
if average_deadstock_price is not None:
self.average_deadstock_price = average_deadstock_price
if change_value is not None:
self.change_value = change_value
@property
def name(self):
"""Gets the name of this SearchHit. # noqa: E501
:return: The name of this SearchHit. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SearchHit.
:param name: The name of this SearchHit. # noqa: E501
:type: str
"""
self._name = name
@property
def brand(self):
"""Gets the brand of this SearchHit. # noqa: E501
:return: The brand of this SearchHit. # noqa: E501
:rtype: str
"""
return self._brand
@brand.setter
def brand(self, brand):
"""Sets the brand of this SearchHit.
:param brand: The brand of this SearchHit. # noqa: E501
:type: str
"""
self._brand = brand
@property
def thumbnail_url(self):
"""Gets the thumbnail_url of this SearchHit. # noqa: E501
:return: The thumbnail_url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._thumbnail_url
@thumbnail_url.setter
def thumbnail_url(self, thumbnail_url):
"""Sets the thumbnail_url of this SearchHit.
:param thumbnail_url: The thumbnail_url of this SearchHit. # noqa: E501
:type: str
"""
self._thumbnail_url = thumbnail_url
@property
def media(self):
"""Gets the media of this SearchHit. # noqa: E501
:return: The media of this SearchHit. # noqa: E501
:rtype: SearchHitMedia
"""
return self._media
@media.setter
def media(self, media):
"""Sets the media of this SearchHit.
:param media: The media of this SearchHit. # noqa: E501
:type: SearchHitMedia
"""
self._media = media
@property
def url(self):
"""Gets the url of this SearchHit. # noqa: E501
:return: The url of this SearchHit. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this SearchHit.
:param url: The url of this SearchHit. # noqa: E501
:type: str
"""
self._url = url
@property
def release_date(self):
"""Gets the release_date of this SearchHit. # noqa: E501
:return: The release_date of this SearchHit. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this SearchHit.
:param release_date: The release_date of this SearchHit. # noqa: E501
:type: str
"""
self._release_date = release_date
@property
def categories(self):
"""Gets the categories of this SearchHit. # noqa: E501
:return: The categories of this SearchHit. # noqa: E501
:rtype: list[str]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this SearchHit.
:param categories: The categories of this SearchHit. # noqa: E501
:type: list[str]
"""
self._categories = categories
@property
def product_category(self):
"""Gets the product_category of this SearchHit. # noqa: E501
:return: The product_category of this SearchHit. # noqa: E501
:rtype: str
"""
return self._product_category
@product_category.setter
def product_category(self, product_category):
"""Sets the product_category of this SearchHit.
:param product_category: The product_category of this SearchHit. # noqa: E501
:type: str
"""
self._product_category = product_category
@property
def ticker_symbol(self):
"""Gets the ticker_symbol of this SearchHit. # noqa: E501
:return: The ticker_symbol of this SearchHit. # noqa: E501
:rtype: str
"""
return self._ticker_symbol
@ticker_symbol.setter
def ticker_symbol(self, ticker_symbol):
"""Sets the ticker_symbol of this SearchHit.
:param ticker_symbol: The ticker_symbol of this SearchHit. # noqa: E501
:type: str
"""
self._ticker_symbol = ticker_symbol
@property
def style_id(self):
"""Gets the style_id of this SearchHit. # noqa: E501
:return: The style_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._style_id
@style_id.setter
def style_id(self, style_id):
"""Sets the style_id of this SearchHit.
:param style_id: The style_id of this SearchHit. # noqa: E501
:type: str
"""
self._style_id = style_id
@property
def make(self):
"""Gets the make of this SearchHit. # noqa: E501
:return: The make of this SearchHit. # noqa: E501
:rtype: str
"""
return self._make
@make.setter
def make(self, make):
"""Sets the make of this SearchHit.
:param make: The make of this SearchHit. # noqa: E501
:type: str
"""
self._make = make
@property
def model(self):
"""Gets the model of this SearchHit. # noqa: E501
:return: The model of this SearchHit. # noqa: E501
:rtype: str
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this SearchHit.
:param model: The model of this SearchHit. # noqa: E501
:type: str
"""
self._model = model
@property
def short_description(self):
"""Gets the short_description of this SearchHit. # noqa: E501
:return: The short_description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._short_description
@short_description.setter
def short_description(self, short_description):
"""Sets the short_description of this SearchHit.
:param short_description: The short_description of this SearchHit. # noqa: E501
:type: str
"""
self._short_description = short_description
@property
def gender(self):
"""Gets the gender of this SearchHit. # noqa: E501
:return: The gender of this SearchHit. # noqa: E501
:rtype: str
"""
return self._gender
@gender.setter
def gender(self, gender):
"""Sets the gender of this SearchHit.
:param gender: The gender of this SearchHit. # noqa: E501
:type: str
"""
self._gender = gender
@property
def colorway(self):
"""Gets the colorway of this SearchHit. # noqa: E501
:return: The colorway of this SearchHit. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this SearchHit.
:param colorway: The colorway of this SearchHit. # noqa: E501
:type: str
"""
self._colorway = colorway
@property
def price(self):
"""Gets the price of this SearchHit. # noqa: E501
:return: The price of this SearchHit. # noqa: E501
:rtype: int
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this SearchHit.
:param price: The price of this SearchHit. # noqa: E501
:type: int
"""
self._price = price
@property
def description(self):
"""Gets the description of this SearchHit. # noqa: E501
:return: The description of this SearchHit. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SearchHit.
:param description: The description of this SearchHit. # noqa: E501
:type: str
"""
self._description = description
@property
def highest_bid(self):
"""Gets the highest_bid of this SearchHit. # noqa: E501
:return: The highest_bid of this SearchHit. # noqa: E501
:rtype: str
"""
return self._highest_bid
@highest_bid.setter
def highest_bid(self, highest_bid):
"""Sets the highest_bid of this SearchHit.
:param highest_bid: The highest_bid of this SearchHit. # noqa: E501
:type: str
"""
self._highest_bid = highest_bid
@property
def total_dollars(self):
"""Gets the total_dollars of this SearchHit. # noqa: E501
:return: The total_dollars of this SearchHit. # noqa: E501
:rtype: str
"""
return self._total_dollars
@total_dollars.setter
def total_dollars(self, total_dollars):
"""Sets the total_dollars of this SearchHit.
:param total_dollars: The total_dollars of this SearchHit. # noqa: E501
:type: str
"""
self._total_dollars = total_dollars
@property
def lowest_ask(self):
"""Gets the lowest_ask of this SearchHit. # noqa: E501
:return: The lowest_ask of this SearchHit. # noqa: E501
:rtype: str
"""
return self._lowest_ask
@lowest_ask.setter
def lowest_ask(self, lowest_ask):
"""Sets the lowest_ask of this SearchHit.
:param lowest_ask: The lowest_ask of this SearchHit. # noqa: E501
:type: str
"""
self._lowest_ask = lowest_ask
@property
def last_sale(self):
"""Gets the last_sale of this SearchHit. # noqa: E501
:return: The last_sale of this SearchHit. # noqa: E501
:rtype: str
"""
return self._last_sale
@last_sale.setter
def last_sale(self, last_sale):
"""Sets the last_sale of this SearchHit.
:param last_sale: The last_sale of this SearchHit. # noqa: E501
:type: str
"""
self._last_sale = last_sale
@property
def sales_last_72(self):
"""Gets the sales_last_72 of this SearchHit. # noqa: E501
:return: The sales_last_72 of this SearchHit. # noqa: E501
:rtype: int
"""
return self._sales_last_72
@sales_last_72.setter
def sales_last_72(self, sales_last_72):
"""Sets the sales_last_72 of this SearchHit.
:param sales_last_72: The sales_last_72 of this SearchHit. # noqa: E501
:type: int
"""
self._sales_last_72 = sales_last_72
@property
def deadstock_sold(self):
"""Gets the deadstock_sold of this SearchHit. # noqa: E501
:return: The deadstock_sold of this SearchHit. # noqa: E501
:rtype: int
"""
return self._deadstock_sold
@deadstock_sold.setter
def deadstock_sold(self, deadstock_sold):
"""Sets the deadstock_sold of this SearchHit.
:param deadstock_sold: The deadstock_sold of this SearchHit. # noqa: E501
:type: int
"""
self._deadstock_sold = deadstock_sold
@property
def quality_bid(self):
"""Gets the quality_bid of this SearchHit. # noqa: E501
:return: The quality_bid of this SearchHit. # noqa: E501
:rtype: int
"""
return self._quality_bid
@quality_bid.setter
def quality_bid(self, quality_bid):
"""Sets the quality_bid of this SearchHit.
:param quality_bid: The quality_bid of this SearchHit. # noqa: E501
:type: int
"""
self._quality_bid = quality_bid
@property
def active(self):
"""Gets the active of this SearchHit. # noqa: E501
:return: The active of this SearchHit. # noqa: E501
:rtype: int
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this SearchHit.
:param active: The active of this SearchHit. # noqa: E501
:type: int
"""
self._active = active
@property
def new_release(self):
"""Gets the new_release of this SearchHit. # noqa: E501
:return: The new_release of this SearchHit. # noqa: E501
:rtype: str
"""
return self._new_release
@new_release.setter
def new_release(self, new_release):
"""Sets the new_release of this SearchHit.
:param new_release: The new_release of this SearchHit. # noqa: E501
:type: str
"""
self._new_release = new_release
@property
def searchable_traits(self):
"""Gets the searchable_traits of this SearchHit. # noqa: E501
:return: The searchable_traits of this SearchHit. # noqa: E501
:rtype: SearchHitSearchableTraits
"""
return self._searchable_traits
@searchable_traits.setter
def searchable_traits(self, searchable_traits):
"""Sets the searchable_traits of this SearchHit.
:param searchable_traits: The searchable_traits of this SearchHit. # noqa: E501
:type: SearchHitSearchableTraits
"""
self._searchable_traits = searchable_traits
@property
def object_id(self):
"""Gets the object_id of this SearchHit. # noqa: E501
:return: The object_id of this SearchHit. # noqa: E501
:rtype: str
"""
return self._object_id
@object_id.setter
def object_id(self, object_id):
"""Sets the object_id of this SearchHit.
:param object_id: The object_id of this SearchHit. # noqa: E501
:type: str
"""
self._object_id = object_id
@property
def annual_high(self):
"""Gets the annual_high of this SearchHit. # noqa: E501
:return: The annual_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_high
@annual_high.setter
def annual_high(self, annual_high):
"""Sets the annual_high of this SearchHit.
:param annual_high: The annual_high of this SearchHit. # noqa: E501
:type: str
"""
self._annual_high = annual_high
@property
def annual_low(self):
"""Gets the annual_low of this SearchHit. # noqa: E501
:return: The annual_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._annual_low
@annual_low.setter
def annual_low(self, annual_low):
"""Sets the annual_low of this SearchHit.
:param annual_low: The annual_low of this SearchHit. # noqa: E501
:type: str
"""
self._annual_low = annual_low
@property
def deadstock_range_low(self):
"""Gets the deadstock_range_low of this SearchHit. # noqa: E501
:return: The deadstock_range_low of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_low
@deadstock_range_low.setter
def deadstock_range_low(self, deadstock_range_low):
"""Sets the deadstock_range_low of this SearchHit.
:param deadstock_range_low: The deadstock_range_low of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_low = deadstock_range_low
@property
def deadstock_range_high(self):
"""Gets the deadstock_range_high of this SearchHit. # noqa: E501
:return: The deadstock_range_high of this SearchHit. # noqa: E501
:rtype: str
"""
return self._deadstock_range_high
@deadstock_range_high.setter
def deadstock_range_high(self, deadstock_range_high):
"""Sets the deadstock_range_high of this SearchHit.
:param deadstock_range_high: The deadstock_range_high of this SearchHit. # noqa: E501
:type: str
"""
self._deadstock_range_high = deadstock_range_high
@property
def average_deadstock_price(self):
"""Gets the average_deadstock_price of this SearchHit. # noqa: E501
:return: The average_deadstock_price of this SearchHit. # noqa: E501
:rtype: str
"""
return self._average_deadstock_price
@average_deadstock_price.setter
def average_deadstock_price(self, average_deadstock_price):
"""Sets the average_deadstock_price of this SearchHit.
:param average_deadstock_price: The average_deadstock_price of this SearchHit. # noqa: E501
:type: str
"""
self._average_deadstock_price = average_deadstock_price
@property
def change_value(self):
"""Gets the change_value of this SearchHit. # noqa: E501
:return: The change_value of this SearchHit. # noqa: E501
:rtype: str
"""
return self._change_value
@change_value.setter
def change_value(self, change_value):
"""Sets the change_value of this SearchHit.
:param change_value: The change_value of this SearchHit. # noqa: E501
:type: str
"""
self._change_value = change_value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SearchHitSearchableTraits(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'style': 'str',
'colorway': 'str',
'retail_price': 'int',
'release_date': 'str'
}
attribute_map = {
'style': 'Style',
'colorway': 'Colorway',
'retail_price': 'Retail Price',
'release_date': 'Release Date'
}
def __init__(self, style=None, colorway=None, retail_price=None, release_date=None): # noqa: E501
"""SearchHitSearchableTraits - a model defined in Swagger""" # noqa: E501
self._style = None
self._colorway = None
self._retail_price = None
self._release_date = None
self.discriminator = None
if style is not None:
self.style = style
if colorway is not None:
self.colorway = colorway
if retail_price is not None:
self.retail_price = retail_price
if release_date is not None:
self.release_date = release_date
@property
def style(self):
"""Gets the style of this SearchHitSearchableTraits. # noqa: E501
:return: The style of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._style
@style.setter
def style(self, style):
"""Sets the style of this SearchHitSearchableTraits.
:param style: The style of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._style = style
@property
def colorway(self):
"""Gets the colorway of this SearchHitSearchableTraits. # noqa: E501
:return: The colorway of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._colorway
@colorway.setter
def colorway(self, colorway):
"""Sets the colorway of this SearchHitSearchableTraits.
:param colorway: The colorway of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._colorway = colorway
@property
def retail_price(self):
"""Gets the retail_price of this SearchHitSearchableTraits. # noqa: E501
:return: The retail_price of this SearchHitSearchableTraits. # noqa: E501
:rtype: int
"""
return self._retail_price
@retail_price.setter
def retail_price(self, retail_price):
"""Sets the retail_price of this SearchHitSearchableTraits.
:param retail_price: The retail_price of this SearchHitSearchableTraits. # noqa: E501
:type: int
"""
self._retail_price = retail_price
@property
def release_date(self):
"""Gets the release_date of this SearchHitSearchableTraits. # noqa: E501
:return: The release_date of this SearchHitSearchableTraits. # noqa: E501
:rtype: str
"""
return self._release_date
@release_date.setter
def release_date(self, release_date):
"""Sets the release_date of this SearchHitSearchableTraits.
:param release_date: The release_date of this SearchHitSearchableTraits. # noqa: E501
:type: str
"""
self._release_date = release_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHitSearchableTraits):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"imported_by": [
"/sdk/python/lib/build/lib/io_stockx/models/__init__.py",
"/sdk/python/lib/build/lib/io_stockx/models/search_results.py"
],
"imports": [
"/sdk/python/lib/build/lib/io_stockx/models/search_hit_searchable_traits.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/lib/test/test_stock_x_api.py
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import io_stockx
from io_stockx.api.stock_x_api import StockXApi # noqa: E501
from io_stockx.rest import ApiException
class TestStockXApi(unittest.TestCase):
"""StockXApi unit test stubs"""
def setUp(self):
self.api = io_stockx.api.stock_x_api.StockXApi() # noqa: E501
def tearDown(self):
pass
def test_delete_portfolio(self):
"""Test case for delete_portfolio
Deletes a portfolio item from the market with the specified id. # noqa: E501
"""
pass
def test_delete_webhook(self):
"""Test case for delete_webhook
"""
pass
def test_get_open_orders(self):
"""Test case for get_open_orders
"""
pass
def test_get_portfolio(self):
"""Test case for get_portfolio
Returns a market portfolio identified by request parameters. # noqa: E501
"""
pass
def test_get_portfolio_item(self):
"""Test case for get_portfolio_item
"""
pass
def test_get_product_by_id(self):
"""Test case for get_product_by_id
"""
pass
def test_get_product_market_data(self):
"""Test case for get_product_market_data
Provides historical market data for a given product. # noqa: E501
"""
pass
def test_get_subscriptions(self):
"""Test case for get_subscriptions
"""
pass
def test_get_webhook(self):
"""Test case for get_webhook
"""
pass
def test_get_webhooks(self):
"""Test case for get_webhooks
"""
pass
def test_login(self):
"""Test case for login
Attempts to log the user in with a username and password. # noqa: E501
"""
pass
def test_lookup_product(self):
"""Test case for lookup_product
"""
pass
def test_new_portfolio_ask(self):
"""Test case for new_portfolio_ask
Creates a new seller ask on the market for a given product. # noqa: E501
"""
pass
def test_new_portfolio_bid(self):
"""Test case for new_portfolio_bid
Creates a new buyer bid on the market for a given product. # noqa: E501
"""
pass
def test_post_webhooks(self):
"""Test case for post_webhooks
"""
pass
def test_search(self):
"""Test case for search
Searches for products by keyword. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
StockX API
PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from io_stockx.api_client import ApiClient
class StockXApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_portfolio(self, id, portfolio, **kwargs): # noqa: E501
"""Deletes a portfolio item from the market with the specified id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_portfolio(id, portfolio, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the portfolio item to delete. (required)
:param PortfolioIdDelRequest portfolio: The request information for the portfolio delete operation. (required)
:return: PortfolioIdDelResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_portfolio_with_http_info(id, portfolio, **kwargs) # noqa: E501
else:
(data) = self.delete_portfolio_with_http_info(id, portfolio, **kwargs) # noqa: E501
return data
def delete_portfolio_with_http_info(self, id, portfolio, **kwargs): # noqa: E501
"""Deletes a portfolio item from the market with the specified id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_portfolio_with_http_info(id, portfolio, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the portfolio item to delete. (required)
:param PortfolioIdDelRequest portfolio: The request information for the portfolio delete operation. (required)
:return: PortfolioIdDelResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'portfolio'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_portfolio" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_portfolio`") # noqa: E501
# verify the required parameter 'portfolio' is set
if ('portfolio' not in params or
params['portfolio'] is None):
raise ValueError("Missing the required parameter `portfolio` when calling `delete_portfolio`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio' in params:
body_params = params['portfolio']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/portfolio/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioIdDelResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_webhook(self, id, **kwargs): # noqa: E501
"""delete_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_webhook(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_webhook_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_webhook_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_webhook_with_http_info(self, id, **kwargs): # noqa: E501
"""delete_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_webhook_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_webhook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_webhook`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/webhook/v1/webhooks/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_open_orders(self, id, **kwargs): # noqa: E501
"""get_open_orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_open_orders(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The customer id to lookup open orders with. (required)
:return: CustomersIdSellingCurrent
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_open_orders_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_open_orders_with_http_info(id, **kwargs) # noqa: E501
return data
def get_open_orders_with_http_info(self, id, **kwargs): # noqa: E501
"""get_open_orders # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_open_orders_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The customer id to lookup open orders with. (required)
:return: CustomersIdSellingCurrent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_open_orders" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_open_orders`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/customers/{id}/selling/current', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CustomersIdSellingCurrent', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_portfolio(self, portfolio, **kwargs): # noqa: E501
"""Returns a market portfolio identified by request parameters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_portfolio(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: Requests parameters for looking up a market portfolio. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_portfolio_with_http_info(portfolio, **kwargs) # noqa: E501
else:
(data) = self.get_portfolio_with_http_info(portfolio, **kwargs) # noqa: E501
return data
def get_portfolio_with_http_info(self, portfolio, **kwargs): # noqa: E501
"""Returns a market portfolio identified by request parameters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_portfolio_with_http_info(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: Requests parameters for looking up a market portfolio. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio' is set
if ('portfolio' not in params or
params['portfolio'] is None):
raise ValueError("Missing the required parameter `portfolio` when calling `get_portfolio`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio' in params:
body_params = params['portfolio']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/portfolio', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_portfolio_item(self, id, **kwargs): # noqa: E501
"""get_portfolio_item # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_portfolio_item(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the portfolio item to lookup. (required)
:return: PortfolioitemsIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_portfolio_item_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_portfolio_item_with_http_info(id, **kwargs) # noqa: E501
return data
def get_portfolio_item_with_http_info(self, id, **kwargs): # noqa: E501
"""get_portfolio_item # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_portfolio_item_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the portfolio item to lookup. (required)
:return: PortfolioitemsIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_portfolio_item`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/portfolioitems/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioitemsIdGetResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_by_id(self, id, **kwargs): # noqa: E501
"""get_product_by_id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_product_by_id(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the product to return. (required)
:param str include:
:return: ProductResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_product_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_product_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_product_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""get_product_by_id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_product_by_id_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The id of the product to return. (required)
:param str include:
:return: ProductResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'include'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_product_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'include' in params:
query_params.append(('include', params['include'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/products/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_market_data(self, product_id, **kwargs): # noqa: E501
"""Provides historical market data for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_product_market_data(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: The product's product UUID (required)
:param str sku: The product's SKU
:return: MarketData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_product_market_data_with_http_info(product_id, **kwargs) # noqa: E501
else:
(data) = self.get_product_market_data_with_http_info(product_id, **kwargs) # noqa: E501
return data
def get_product_market_data_with_http_info(self, product_id, **kwargs): # noqa: E501
"""Provides historical market data for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_product_market_data_with_http_info(product_id, async=True)
>>> result = thread.get()
:param async bool
:param str product_id: The product's product UUID (required)
:param str sku: The product's SKU
:return: MarketData
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'sku'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_market_data" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params or
params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_market_data`") # noqa: E501
collection_formats = {}
path_params = {}
if 'product_id' in params:
path_params['productId'] = params['product_id'] # noqa: E501
query_params = []
if 'sku' in params:
query_params.append(('sku', params['sku'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/products/{productId}/market', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MarketData', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscriptions(self, **kwargs): # noqa: E501
"""get_subscriptions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_subscriptions(async=True)
>>> result = thread.get()
:param async bool
:return: SubscriptionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_subscriptions_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_subscriptions_with_http_info(**kwargs) # noqa: E501
return data
def get_subscriptions_with_http_info(self, **kwargs): # noqa: E501
"""get_subscriptions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_subscriptions_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: SubscriptionsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscriptions" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/webhook/v1/subscriptions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionsResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_webhook(self, id, **kwargs): # noqa: E501
"""get_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_webhook(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: (required)
:return: WebhooksIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_webhook_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_webhook_with_http_info(id, **kwargs) # noqa: E501
return data
def get_webhook_with_http_info(self, id, **kwargs): # noqa: E501
"""get_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_webhook_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: (required)
:return: WebhooksIdGetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_webhook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_webhook`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/webhook/v1/webhooks/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebhooksIdGetResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_webhooks(self, **kwargs): # noqa: E501
"""get_webhooks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_webhooks(async=True)
>>> result = thread.get()
:param async bool
:return: WebhooksGetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_webhooks_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_webhooks_with_http_info(**kwargs) # noqa: E501
return data
def get_webhooks_with_http_info(self, **kwargs): # noqa: E501
"""get_webhooks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_webhooks_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: WebhooksGetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_webhooks" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/webhook/v1/webhooks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebhooksGetResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def login(self, login, **kwargs): # noqa: E501
"""Attempts to log the user in with a username and password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.login(login, async=True)
>>> result = thread.get()
:param async bool
:param LoginRequest login: Object that contains the user's authentication credentials.' (required)
:return: LoginResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.login_with_http_info(login, **kwargs) # noqa: E501
else:
(data) = self.login_with_http_info(login, **kwargs) # noqa: E501
return data
def login_with_http_info(self, login, **kwargs): # noqa: E501
"""Attempts to log the user in with a username and password. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.login_with_http_info(login, async=True)
>>> result = thread.get()
:param async bool
:param LoginRequest login: Object that contains the user's authentication credentials.' (required)
:return: LoginResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['login'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method login" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'login' is set
if ('login' not in params or
params['login'] is None):
raise ValueError("Missing the required parameter `login` when calling `login`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'login' in params:
body_params = params['login']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/login', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LoginResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def lookup_product(self, **kwargs): # noqa: E501
"""lookup_product # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.lookup_product(async=True)
>>> result = thread.get()
:param async bool
:param str identifier: The product identifier to lookup, e.g. (air-jordan-1-retro-high-off-white-chicago)
:param str size: The size of the product.
:return: ProductInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.lookup_product_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.lookup_product_with_http_info(**kwargs) # noqa: E501
return data
def lookup_product_with_http_info(self, **kwargs): # noqa: E501
"""lookup_product # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.lookup_product_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str identifier: The product identifier to lookup, e.g. (air-jordan-1-retro-high-off-white-chicago)
:param str size: The size of the product.
:return: ProductInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['identifier', 'size'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method lookup_product" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'identifier' in params:
query_params.append(('identifier', params['identifier'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/product/lookup', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProductInfo', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def new_portfolio_ask(self, portfolio, **kwargs): # noqa: E501
"""Creates a new seller ask on the market for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.new_portfolio_ask(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: The portfolio request representing the ask to place on the market. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.new_portfolio_ask_with_http_info(portfolio, **kwargs) # noqa: E501
else:
(data) = self.new_portfolio_ask_with_http_info(portfolio, **kwargs) # noqa: E501
return data
def new_portfolio_ask_with_http_info(self, portfolio, **kwargs): # noqa: E501
"""Creates a new seller ask on the market for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.new_portfolio_ask_with_http_info(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: The portfolio request representing the ask to place on the market. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_portfolio_ask" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio' is set
if ('portfolio' not in params or
params['portfolio'] is None):
raise ValueError("Missing the required parameter `portfolio` when calling `new_portfolio_ask`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio' in params:
body_params = params['portfolio']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/portfolio/ask', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def new_portfolio_bid(self, portfolio, **kwargs): # noqa: E501
"""Creates a new buyer bid on the market for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.new_portfolio_bid(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: The portfolio request representing the bid to place on the market. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.new_portfolio_bid_with_http_info(portfolio, **kwargs) # noqa: E501
else:
(data) = self.new_portfolio_bid_with_http_info(portfolio, **kwargs) # noqa: E501
return data
def new_portfolio_bid_with_http_info(self, portfolio, **kwargs): # noqa: E501
"""Creates a new buyer bid on the market for a given product. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.new_portfolio_bid_with_http_info(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param PortfolioRequest portfolio: The portfolio request representing the bid to place on the market. (required)
:return: PortfolioResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method new_portfolio_bid" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio' is set
if ('portfolio' not in params or
params['portfolio'] is None):
raise ValueError("Missing the required parameter `portfolio` when calling `new_portfolio_bid`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio' in params:
body_params = params['portfolio']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/v1/portfolio/bid', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortfolioResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_webhooks(self, portfolio, **kwargs): # noqa: E501
"""post_webhooks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_webhooks(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param WebhooksPostRequest portfolio: (required)
:return: WebhooksPostResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.post_webhooks_with_http_info(portfolio, **kwargs) # noqa: E501
else:
(data) = self.post_webhooks_with_http_info(portfolio, **kwargs) # noqa: E501
return data
def post_webhooks_with_http_info(self, portfolio, **kwargs): # noqa: E501
"""post_webhooks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.post_webhooks_with_http_info(portfolio, async=True)
>>> result = thread.get()
:param async bool
:param WebhooksPostRequest portfolio: (required)
:return: WebhooksPostResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['portfolio'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_webhooks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'portfolio' is set
if ('portfolio' not in params or
params['portfolio'] is None):
raise ValueError("Missing the required parameter `portfolio` when calling `post_webhooks`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'portfolio' in params:
body_params = params['portfolio']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/webhook/v1/webhooks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebhooksPostResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search(self, query, **kwargs): # noqa: E501
"""Searches for products by keyword. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search(query, async=True)
>>> result = thread.get()
:param async bool
:param str query: The phrase or keyword to search with. (required)
:return: SearchResults
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.search_with_http_info(query, **kwargs) # noqa: E501
else:
(data) = self.search_with_http_info(query, **kwargs) # noqa: E501
return data
def search_with_http_info(self, query, **kwargs): # noqa: E501
"""Searches for products by keyword. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.search_with_http_info(query, async=True)
>>> result = thread.get()
:param async bool
:param str query: The phrase or keyword to search with. (required)
:return: SearchResults
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['query'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'query' is set
if ('query' not in params or
params['query'] is None):
raise ValueError("Missing the required parameter `query` when calling `search`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'query' in params:
query_params.append(('query', params['query'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['JWT', 'api_key'] # noqa: E501
return self.api_client.call_api(
'/v2/search', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SearchResults', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
{
"imported_by": [],
"imports": [
"/sdk/python/lib/build/lib/io_stockx/api/stock_x_api.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/src/login.py
|
from __future__ import print_function
import time
import io_stockx
from example_constants import ExampleConstants
from io_stockx.rest import ApiException
from pprint import pprint
# Configure API key authorization: api_key
configuration = io_stockx.Configuration()
configuration.host = "https://gateway.stockx.com/stage"
configuration.api_key['x-api-key'] = ExampleConstants.AWS_API_KEY
# create an instance of the API class
stockx = io_stockx.StockXApi(io_stockx.ApiClient(configuration))
login = io_stockx.LoginRequest(email=ExampleConstants.STOCKX_USERNAME, password=ExampleConstants.STOCKX_PASSWORD)
try:
# Attempts to log the user in with a username and password.
api_response = stockx.login(login)
pprint(api_response)
except ApiException as e:
print("Exception when calling StockXApi->login: %s\n" % e)
|
from __future__ import print_function
import time
import io_stockx
from io_stockx.rest import ApiException
from pprint import pprint
class ExampleConstants:
AWS_API_KEY = "<API Key>"
STOCKX_USERNAME = "<StockX Username>"
STOCKX_PASSWORD = "<StockX Password>"
DEMO_PRODUCT_ID = "air-jordan-1-retro-high-off-white-chicago"
DEMO_CUSTOMER_ID = "1471698"
ENABLE_DEBUG = True
JWT_HEADER = "Jwt-Authorization"
|
{
"imported_by": [],
"imports": [
"/sdk/python/src/example_constants.py"
]
}
|
stvncrn/stockx_api_ref
|
/sdk/python/src/place_new_lowest_ask_example.py
|
from __future__ import print_function
import time
import io_stockx
from example_constants import ExampleConstants
from io_stockx.rest import ApiException
from pprint import pprint
# Configure API key authorization: api_key
configuration = io_stockx.Configuration()
configuration.host = "https://gateway.stockx.com/stage"
configuration.api_key['x-api-key'] = ExampleConstants.AWS_API_KEY
# create an instance of the API class
stockx = io_stockx.StockXApi(io_stockx.ApiClient(configuration))
login = io_stockx.LoginRequest(email=ExampleConstants.STOCKX_USERNAME, password=ExampleConstants.STOCKX_PASSWORD)
try:
# Attempts to log the user in with a username and password.
api_response = stockx.login_with_http_info(login)
# Get the customer object after login
customer = api_response[0]
# Get the login's assigned jwt token
jwt_token = api_response[2]['Jwt-Authorization']
# Use the jwt token to authenticate future requests
stockx.api_client.set_default_header('jwt-authorization', jwt_token)
# Search for a type of product
search_result = stockx.search('Jordan Retro Black Cat')
first_hit = search_result.hits[0]
style_id = first_hit.style_id
# Lookup the first product returned from the search
product = stockx.lookup_product(identifier=style_id, size='11')
# Get the current market data for the product (highest bid info, etc.)
attributes = product.data[0].attributes
id = product.data[0].id
uuid = attributes.product_uuid
# Get the product market data
market_data = stockx.get_product_market_data(id, sku=uuid)
# Get the lowest ask for the product and decrement it
lowest_ask = market_data.market.lowest_ask
lowest_ask += 1
# Create a portfolio item request with a higher bid
item = io_stockx.PortfolioRequestPortfolioItem()
item.amount = lowest_ask
item.sku_uuid = "bae25b67-a721-4f57-ad5a-79973c7d0a5c"
item.matched_with_date = "2018-12-12T05:00:00+0000"
item.expires_at = "2018-12-12T12:39:07+00:00"
request = io_stockx.PortfolioRequest()
request.portfolio_item = item
request.customer = customer
request.timezone = "America/Detroit"
# Submit the ask
ask_resp = stockx.new_portfolio_ask(request)
pprint(ask_resp)
except ApiException as e:
print("Exception when calling StockXApi->new_portfolio_ask: %s\n" % e)
|
from __future__ import print_function
import time
import io_stockx
from io_stockx.rest import ApiException
from pprint import pprint
class ExampleConstants:
AWS_API_KEY = "<API Key>"
STOCKX_USERNAME = "<StockX Username>"
STOCKX_PASSWORD = "<StockX Password>"
DEMO_PRODUCT_ID = "air-jordan-1-retro-high-off-white-chicago"
DEMO_CUSTOMER_ID = "1471698"
ENABLE_DEBUG = True
JWT_HEADER = "Jwt-Authorization"
|
{
"imported_by": [],
"imports": [
"/sdk/python/src/example_constants.py"
]
}
|
jlamonade/splitteroni
|
/splitter/admin.py
|
from django.contrib import admin
from .models import Bill, Person, Item
# Register your models here.
admin.site.register(Bill)
admin.site.register(Person)
admin.site.register(Item)
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
|
{
"imported_by": [],
"imports": [
"/splitter/models.py"
]
}
|
jlamonade/splitteroni
|
/splitter/forms.py
|
from django.forms import forms, ModelForm
from django.utils.translation import gettext_lazy as _
from .models import Bill
class BillCreateForm(ModelForm):
class Meta:
model = Bill
fields = ('title', 'tax_percent', 'tip_percent',)
labels = {
'title': _('Name'),
}
help_texts = {
'title': _('The current date and time will be used if name field is empty.'),
'tax_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
'tip_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
}
error_messages = {
'title': {
'max_length': _("Name is too long."),
},
'tax_percent': {
'max_digits': _("Too many digits.")
},
'tip_percent': {
'max_digits': _("Too many digits.")
}
}
class BillUpdateForm(ModelForm):
class Meta:
model = Bill
fields = ('title',)
labels = {
'title': _('Name'),
}
class BillUpdateTaxPercentForm(ModelForm):
# def __init__(self, *args, **kwargs):
# initial = kwargs.get('initial', {})
# initial['tax'] = 0
# kwargs['initial'] = initial
# super(BillUpdateTaxPercentForm, self).__init__(*args, **kwargs)
class Meta:
model = Bill
fields = ('tax_percent',)
help_texts = {
'tax_percent': _('Please enter a percent(%) amount.')
}
class BillUpdateTaxAmountForm(ModelForm):
class Meta:
model = Bill
fields = ('tax',)
help_texts = {
'tax': _('Please enter a currency amount.')
}
class BillUpdateTipForm(ModelForm):
class Meta:
model = Bill
fields = ('tip',)
labels = {
'tip': _('Tip/Service Charge'),
}
help_texts = {
'tip': _('Please enter currency amount.')
}
class BillUpdateTipPercentForm(ModelForm):
class Meta:
model = Bill
fields = ('tip_percent',)
labels = {
'tip_percent': _('Tip/Service Charge Percent'),
}
help_texts = {
'tip': _('Please enter a percent(%) amount.')
}
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
|
{
"imported_by": [
"/splitter/views.py"
],
"imports": [
"/splitter/models.py"
]
}
|
jlamonade/splitteroni
|
/splitter/models.py
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
|
from decimal import Decimal
def _check_tip_tax_then_add(self):
# Checks to see if tip or tax is null before adding them to total else it returns 0
total = 0
tip = self.get_tip_amount()
tax = self.get_tax_amount()
if tip:
total += tip
if tax:
total += tax
return Decimal(total)
|
{
"imported_by": [
"/splitter/admin.py",
"/splitter/forms.py",
"/splitter/tests.py",
"/splitter/views.py"
],
"imports": [
"/splitter/utils.py"
]
}
|
jlamonade/splitteroni
|
/splitter/tests.py
|
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.contrib.auth import get_user_model
from decimal import Decimal
from .models import Bill, Person, Item
# Create your tests here.
class SplitterTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='testpass',
)
self.bill = Bill.objects.create(
title='testbill',
tip=12.00,
tax=13.00,
owner=self.user,
)
self.person = Person.objects.create(
name='testperson',
bill=self.bill
)
self.item = Item.objects.create(
title='testitem',
price=14.00,
person=self.person,
bill=self.bill,
)
self.shared_item = Item.objects.create(
title='testshareditem',
price=15.00,
bill=self.bill,
shared=True,
)
# Testing tax percent/amount
self.bill_two = Bill.objects.create(
title='testbill2',
tip_percent=15,
tax_percent=8.875,
owner=self.user,
)
self.item_two = Item.objects.create(
title='testitem2',
price=14.00,
bill=self.bill_two,
shared=True,
)
self.bill_total = self.item.price + self.shared_item.price + self.bill.tax + self.bill.tip
self.shared_item_total = self.bill.tip + self.bill.tax + self.shared_item.price
self.bill_detail_response = self.client.get(self.bill.get_absolute_url())
self.bill_two_response = self.client.get(self.bill_two.get_absolute_url())
def test_bill_object(self):
self.assertEqual(self.bill.title, 'testbill')
self.assertEqual(self.bill.tip, 12.00)
self.assertEqual(self.bill.tax, 13.00)
self.assertEqual(self.bill.owner, self.user)
def test_bill_list_view_for_logged_in_user(self):
self.client.login(email='testuser@email.com', password='testpass')
response = self.client.get(reverse('bill-list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testbill'.title())
self.assertTemplateUsed(response, 'splitter/bill_list.html')
def test_bill_list_view_for_logged_out_users(self):
response = self.client.get(reverse('bill-list'))
self.assertEqual(response.status_code, 200)
def test_bill_detail_view(self):
no_response = self.client.get('/bill/12345/')
self.assertEqual(self.bill_detail_response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(self.bill_detail_response, 'testbill'.title())
self.assertContains(self.bill_detail_response, '12.00')
self.assertContains(self.bill_detail_response, '13.00')
self.assertContains(self.bill_detail_response, self.item.price)
self.assertContains(self.bill_detail_response, self.shared_item.price)
self.assertContains(self.bill_detail_response, self.bill_total)
self.assertTemplateUsed(self.bill_detail_response, 'splitter/bill_detail.html')
def test_person_object(self):
self.assertEqual(self.person.name, 'testperson')
self.assertEqual(self.person.bill, self.bill)
def test_person_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testperson'.title())
def test_item_object(self):
self.assertEqual(self.item.title, 'testitem')
self.assertEqual(self.item.price, 14.00)
self.assertEqual(self.item.bill, self.bill)
self.assertEqual(self.item.person, self.person)
def test_item_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testitem')
self.assertContains(self.bill_detail_response, 14.00)
def test_shared_item_object(self):
self.assertEqual(self.shared_item.title, 'testshareditem')
self.assertEqual(self.shared_item.price, 15.00)
self.assertEqual(self.shared_item.bill, self.bill)
def test_shared_item_object_in_bill_detail_view(self):
self.assertContains(self.bill_detail_response, 'testshareditem')
self.assertContains(self.bill_detail_response, 15.00)
def test_bill_model_methods(self):
"""Tests for Bill model methods."""
# Bill.get_order_total()
self.assertEqual(self.bill.get_order_grand_total(), self.bill_total)
# Bill.get_shared_items_total()
self.assertEqual(self.bill.get_shared_items_total(), self.shared_item.price)
def test_person_model_methods(self):
"""Tests for Person model methods."""
# Person.get_shared_items_split()
self.assertEqual(self.person.get_shared_items_split(), self.shared_item_total)
# Person.get_person_total()
self.assertEqual(self.person.get_person_total(), self.bill.get_order_grand_total())
def test_bill_calculate_tax(self):
self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tax_amount()))
self.assertContains(self.bill_two_response, self.bill_two.tax_percent)
self.bill_two.tax = 12.00
self.assertContains(self.bill_two_response, Decimal(self.bill_two.tax))
def test_bill_calculate_tip(self):
self.assertContains(self.bill_two_response, Decimal(self.bill_two.get_tip_amount()))
self.assertContains(self.bill_two_response, self.bill_two.tip_percent)
self.bill_two.tip = 12.00
self.assertContains(self.bill_two_response, Decimal(self.bill_two.tip))
def test_bill_saves_session(self):
self.client.session.create()
self.bill_three = Bill.objects.create(
title='testbill3',
session=self.client.session.session_key,
)
self.assertEqual(self.bill_three.session, self.client.session.session_key)
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
|
{
"imported_by": [],
"imports": [
"/splitter/models.py"
]
}
|
jlamonade/splitteroni
|
/splitter/urls.py
|
from django.urls import path
from .views import (
BillCreateView,
BillDetailView,
PersonCreateView,
PersonDeleteView,
BillListView,
ItemCreateView,
ItemDeleteView,
SharedItemCreateView,
BillUpdateView,
BillUpdateTaxPercentView,
BillUpdateTaxAmountView,
BillUpdateTipAmountView,
BillUpdateTipPercentView,
BillDeleteView,
)
urlpatterns = [
# Bill links
path('new/', BillCreateView.as_view(), name='bill-create'),
path('<uuid:pk>/', BillDetailView.as_view(), name='bill-detail'),
path('archive/', BillListView.as_view(), name='bill-list'),
path('<uuid:pk>/update/', BillUpdateView.as_view(), name='bill-update'),
path('<uuid:pk>/update-tax-percent/',
BillUpdateTaxPercentView.as_view(),
name='bill-update-tax-percent'),
path('<uuid:pk>/update-tax-amount/',
BillUpdateTaxAmountView.as_view(),
name='bill-update-tax-amount'),
path('<uuid:pk>/update-tip-amount/', BillUpdateTipAmountView.as_view(), name='bill-update-tip'),
path('<uuid:pk>/update-tip-percent/',
BillUpdateTipPercentView.as_view(),
name='bill-update-tip-percent'),
path('<uuid:pk>/delete/', BillDeleteView.as_view(), name='bill-delete'),
# Person links
path('<uuid:pk>/add-person/', PersonCreateView.as_view(), name='person-create'),
path('person/<uuid:pk>/delete/', PersonDeleteView.as_view(), name='person-delete'),
# Item links
path('<uuid:bill_id>/<uuid:person_id>/add-item/',
ItemCreateView.as_view(),
name='item-create'
),
path('<uuid:bill_id>/add-shared-item/',
SharedItemCreateView.as_view(),
name='shared-item-create'
),
path('item/<uuid:pk>/item-delete/', ItemDeleteView.as_view(), name='item-delete'),
]
|
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.http import Http404
from decimal import Decimal
from .models import Bill, Person, Item
from .forms import (BillCreateForm,
BillUpdateForm,
BillUpdateTaxPercentForm,
BillUpdateTaxAmountForm,
BillUpdateTipForm,
BillUpdateTipPercentForm)
# from .mixins import BillUpdateViewMixin
# Create your views here.
class BillCreateView(CreateView):
template_name = 'splitter/bill_create.html'
form_class = BillCreateForm
def form_valid(self, form):
if self.request.user.is_authenticated:
form.instance.owner = self.request.user
return super().form_valid(form)
else:
self.request.session.create()
form.instance.session = self.request.session.session_key
return super().form_valid(form)
class BillDetailView(DetailView):
model = Bill
template_name = 'splitter/bill_detail.html'
context_object_name = 'bill'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['people'] = Person.objects.filter(
bill=self.object.id)
context['shared_items'] = Item.objects.filter(bill=self.object.id, shared=True)
if self.object.tax_percent:
context['tax_percentage'] = Decimal(self.object.tax_percent).quantize(Decimal('0.001'))
if self.object.tip_percent:
context['tip_percentage'] = Decimal(self.object.tip_percent.quantize(Decimal('0')))
return context
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
obj = get_object_or_404(Bill, id=pk)
if self.request.user.is_authenticated and self.request.user == obj.owner:
return obj
elif self.request.session.session_key == obj.session:
return obj
else:
raise Http404
class PersonCreateView(CreateView):
model = Person
template_name = 'splitter/person_create.html'
fields = ('name',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillDeleteView(DeleteView):
model = Bill
template_name = 'splitter/bill_delete.html'
def get_success_url(self):
return reverse_lazy('bill-list')
class BillListView(ListView):
template_name = 'splitter/bill_list.html'
context_object_name = 'bills'
def get_queryset(self):
if self.request.user.is_authenticated:
qs = Bill.objects.filter(owner=self.request.user).order_by('-date_created')
elif self.request.session.session_key:
qs = Bill.objects.filter(session=self.request.session.session_key).order_by('-date_created')
else:
qs = None
return qs
class PersonDeleteView(DeleteView):
model = Person
template_name = 'splitter/person_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class ItemCreateView(CreateView):
model = Item
template_name = 'splitter/item_create.html'
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
person = get_object_or_404(Person, id=self.kwargs['person_id'])
form.instance.bill = bill
form.instance.person = person
return super().form_valid(form)
class SharedItemCreateView(CreateView):
model = Item
template_name = "splitter/item_create.html"
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
form.instance.bill = bill
form.instance.shared = True
return super().form_valid(form)
class ItemDeleteView(DeleteView):
model = Item
template_name = 'splitter/item_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class BillUpdateView(UpdateView):
model = Bill
template_name = 'splitter/bill_update.html'
form_class = BillUpdateForm
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillUpdateTaxPercentView(UpdateView):
model = Bill
form_class = BillUpdateTaxPercentForm
template_name = 'splitter/bill_update_tax_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax = None
return super().form_valid(form)
class BillUpdateTaxAmountView(UpdateView):
model = Bill
form_class = BillUpdateTaxAmountForm
template_name = 'splitter/bill_update_tax_amount.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax_percent = None
return super().form_valid(form)
class BillUpdateTipAmountView(UpdateView):
model = Bill
form_class = BillUpdateTipForm
template_name = 'splitter/bill_update_tip.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip_percent = None
return super().form_valid(form)
class BillUpdateTipPercentView(UpdateView):
model = Bill
form_class = BillUpdateTipPercentForm
template_name = 'splitter/bill_update_tip_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip = None
return super().form_valid(form)
|
{
"imported_by": [],
"imports": [
"/splitter/views.py"
]
}
|
jlamonade/splitteroni
|
/splitter/views.py
|
from django.views.generic import CreateView, DetailView, DeleteView, ListView, UpdateView
from django.shortcuts import get_object_or_404
from django.urls import reverse_lazy
from django.http import Http404
from decimal import Decimal
from .models import Bill, Person, Item
from .forms import (BillCreateForm,
BillUpdateForm,
BillUpdateTaxPercentForm,
BillUpdateTaxAmountForm,
BillUpdateTipForm,
BillUpdateTipPercentForm)
# from .mixins import BillUpdateViewMixin
# Create your views here.
class BillCreateView(CreateView):
template_name = 'splitter/bill_create.html'
form_class = BillCreateForm
def form_valid(self, form):
if self.request.user.is_authenticated:
form.instance.owner = self.request.user
return super().form_valid(form)
else:
self.request.session.create()
form.instance.session = self.request.session.session_key
return super().form_valid(form)
class BillDetailView(DetailView):
model = Bill
template_name = 'splitter/bill_detail.html'
context_object_name = 'bill'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['people'] = Person.objects.filter(
bill=self.object.id)
context['shared_items'] = Item.objects.filter(bill=self.object.id, shared=True)
if self.object.tax_percent:
context['tax_percentage'] = Decimal(self.object.tax_percent).quantize(Decimal('0.001'))
if self.object.tip_percent:
context['tip_percentage'] = Decimal(self.object.tip_percent.quantize(Decimal('0')))
return context
def get_object(self, queryset=None):
pk = self.kwargs.get('pk')
obj = get_object_or_404(Bill, id=pk)
if self.request.user.is_authenticated and self.request.user == obj.owner:
return obj
elif self.request.session.session_key == obj.session:
return obj
else:
raise Http404
class PersonCreateView(CreateView):
model = Person
template_name = 'splitter/person_create.html'
fields = ('name',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillDeleteView(DeleteView):
model = Bill
template_name = 'splitter/bill_delete.html'
def get_success_url(self):
return reverse_lazy('bill-list')
class BillListView(ListView):
template_name = 'splitter/bill_list.html'
context_object_name = 'bills'
def get_queryset(self):
if self.request.user.is_authenticated:
qs = Bill.objects.filter(owner=self.request.user).order_by('-date_created')
elif self.request.session.session_key:
qs = Bill.objects.filter(session=self.request.session.session_key).order_by('-date_created')
else:
qs = None
return qs
class PersonDeleteView(DeleteView):
model = Person
template_name = 'splitter/person_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class ItemCreateView(CreateView):
model = Item
template_name = 'splitter/item_create.html'
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
person = get_object_or_404(Person, id=self.kwargs['person_id'])
form.instance.bill = bill
form.instance.person = person
return super().form_valid(form)
class SharedItemCreateView(CreateView):
model = Item
template_name = "splitter/item_create.html"
fields = ('title', 'price',)
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['bill_id'])
form.instance.bill = bill
form.instance.shared = True
return super().form_valid(form)
class ItemDeleteView(DeleteView):
model = Item
template_name = 'splitter/item_delete.html'
def get_success_url(self):
return reverse_lazy('bill-detail', args=[self.object.bill.id])
class BillUpdateView(UpdateView):
model = Bill
template_name = 'splitter/bill_update.html'
form_class = BillUpdateForm
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
return super().form_valid(form)
class BillUpdateTaxPercentView(UpdateView):
model = Bill
form_class = BillUpdateTaxPercentForm
template_name = 'splitter/bill_update_tax_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax = None
return super().form_valid(form)
class BillUpdateTaxAmountView(UpdateView):
model = Bill
form_class = BillUpdateTaxAmountForm
template_name = 'splitter/bill_update_tax_amount.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tax_percent = None
return super().form_valid(form)
class BillUpdateTipAmountView(UpdateView):
model = Bill
form_class = BillUpdateTipForm
template_name = 'splitter/bill_update_tip.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip_percent = None
return super().form_valid(form)
class BillUpdateTipPercentView(UpdateView):
model = Bill
form_class = BillUpdateTipPercentForm
template_name = 'splitter/bill_update_tip_percent.html'
def form_valid(self, form):
bill = get_object_or_404(Bill, id=self.kwargs['pk'])
form.instance.bill = bill
form.instance.tip = None
return super().form_valid(form)
|
import uuid
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from decimal import Decimal
from .utils import _check_tip_tax_then_add
# Create your models here.
class Bill(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, blank=True)
session = models.CharField(max_length=40, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
tip = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tip_percent = models.DecimalField(max_digits=10, decimal_places=3, blank=True, null=True)
tax = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True)
tax_percent = models.DecimalField(max_digits=10, decimal_places=5, blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
def __str__(self):
if not self.title:
return self.date_created.strftime("%m/%d/%y %I:%M%p")
else:
return self.title.title()
def get_tax_amount(self):
subtotal = self.get_order_subtotal()
if self.tax_percent:
tax_amount = (subtotal * (Decimal(self.tax_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tax = tax_amount
bill.save()
return Decimal(tax_amount).quantize(Decimal('.01'))
elif self.tax:
return Decimal(self.tax).quantize(Decimal('.01'))
else:
return 0
def get_tip_amount(self):
subtotal = self.get_order_subtotal() + self.get_tax_amount()
if self.tip_percent:
tip_amount = (subtotal * (Decimal(self.tip_percent / 100)))
bill = Bill.objects.get(id=self.id)
bill.tip = tip_amount
bill.save()
return Decimal(tip_amount).quantize(Decimal('.01'))
elif self.tip:
return Decimal(self.tip).quantize(Decimal('.01'))
else:
return 0
def get_order_grand_total(self):
# Returns the sum of all items including tax and tip
total = _check_tip_tax_then_add(self) + self.get_order_subtotal()
return Decimal(total)
def get_order_subtotal(self):
total = 0
items = Item.objects.filter(bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_shared_items_total(self):
# Returns sum of shared items only
total = 0
items = Item.objects.filter(shared=True, bill=self)
for item in items:
total += Decimal(item.price)
return Decimal(total)
def get_absolute_url(self):
return reverse('bill-detail', args=[self.id])
class Person(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
name = models.CharField(max_length=30)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='people')
class Meta:
verbose_name_plural = 'people'
indexes = [
models.Index(fields=['id'], name='person_id_index'),
]
def __str__(self):
return self.name.title()
def get_shared_items_split(self):
# Returns the amount every person owes inside the shared items including tax and tip
total = _check_tip_tax_then_add(self.bill)
person_count = self.bill.people.all().count()
items = self.bill.items.filter(shared=True)
for item in items:
total += Decimal(item.price)
split_amount = Decimal(total / person_count)
return Decimal(split_amount)
def get_person_total(self):
# Returns the sum of the person's items and their share of the shared items total
total = 0
items = Item.objects.filter(person=self)
for item in items:
total += Decimal(item.price)
return Decimal(total + self.get_shared_items_split()).quantize(Decimal('.01'))
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
class Item(models.Model):
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False
)
title = models.CharField(max_length=50, blank=True, null=True)
price = models.DecimalField(max_digits=15, decimal_places=2)
person = models.ForeignKey(
Person,
on_delete=models.CASCADE,
related_name='items',
blank=True,
null=True
)
bill = models.ForeignKey(Bill, on_delete=models.CASCADE, related_name='items')
shared = models.BooleanField(default=False)
class Meta:
indexes = [
models.Index(fields=['id'], name='item_id_index'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('bill-detail', args=[self.bill.id])
--- FILE SEPARATOR ---
from django.forms import forms, ModelForm
from django.utils.translation import gettext_lazy as _
from .models import Bill
class BillCreateForm(ModelForm):
class Meta:
model = Bill
fields = ('title', 'tax_percent', 'tip_percent',)
labels = {
'title': _('Name'),
}
help_texts = {
'title': _('The current date and time will be used if name field is empty.'),
'tax_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
'tip_percent': _('Please enter a percentage value. You can leave this blank and change it later.'),
}
error_messages = {
'title': {
'max_length': _("Name is too long."),
},
'tax_percent': {
'max_digits': _("Too many digits.")
},
'tip_percent': {
'max_digits': _("Too many digits.")
}
}
class BillUpdateForm(ModelForm):
class Meta:
model = Bill
fields = ('title',)
labels = {
'title': _('Name'),
}
class BillUpdateTaxPercentForm(ModelForm):
# def __init__(self, *args, **kwargs):
# initial = kwargs.get('initial', {})
# initial['tax'] = 0
# kwargs['initial'] = initial
# super(BillUpdateTaxPercentForm, self).__init__(*args, **kwargs)
class Meta:
model = Bill
fields = ('tax_percent',)
help_texts = {
'tax_percent': _('Please enter a percent(%) amount.')
}
class BillUpdateTaxAmountForm(ModelForm):
class Meta:
model = Bill
fields = ('tax',)
help_texts = {
'tax': _('Please enter a currency amount.')
}
class BillUpdateTipForm(ModelForm):
class Meta:
model = Bill
fields = ('tip',)
labels = {
'tip': _('Tip/Service Charge'),
}
help_texts = {
'tip': _('Please enter currency amount.')
}
class BillUpdateTipPercentForm(ModelForm):
class Meta:
model = Bill
fields = ('tip_percent',)
labels = {
'tip_percent': _('Tip/Service Charge Percent'),
}
help_texts = {
'tip': _('Please enter a percent(%) amount.')
}
|
{
"imported_by": [
"/splitter/urls.py"
],
"imports": [
"/splitter/models.py",
"/splitter/forms.py"
]
}
|
trineary/TradeTestingEngine
|
/TTE.py
|
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 9/21/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# tte.py
#
# This file handles the interface to most of the code in this project.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
import math
import pandas as pd
import datetime as dt
from matplotlib import pyplot
import yahoo_finance as yfinance
# Import my classes
from TradeTracking.TradeHistory import TradeTracking
from pytte.TTEBootstrapTests import WhiteBootstrap, MonteCarloBootstrap, TTEBootstrap
class TTE:
# Bootstrap options that are available with this package
BOOTSTRAP_TTE = 0
BOOTSTRAP_MONTE_CARLO = 1
BOOTSTRAP_WHITE = 2
# Trading direction options\
CASH = 0
LONG = 1
SHORT = -1
def __init__(self):
# Trade tracking
self._tt = TradeTracking(trackDailyPositions=True)
self._trade_history = None
# Bootstrap initializations
self._bs_tte = None
self._bs_mc = None
self._bs_wh = None
self._bs = None # this is the currently selected bootstrap
# Dataframe for trade data
self._df = None
self._column = None
self._ticker = None
return
def get_hist_data(self, ticker, startdate, stopdate, column="Adj_Close"):
# get historical data
# Inputs
# 1. ticker - ticker sympol of desired equite. Ex. 'SPY'
# 2. startdate - start date to start collecting data from. Ex. startdate = '2016-08-20'
# 3. stopdate - stop date to stop collecting data from. Ex. endDate = '2016-09-16'
# 4. column - this is the column in the dataframe to use to get price information from. Default is 'Adj_Close'
# Returns
# 1. dataframe containing data for the specified inputs
#
# Get a dataframe with data between the two dates for the specified ticker. This will automatically load
# the historical data into the local _df variable.
# Get the historical data and load into the dataframe variable. Return the historical data to the calling
# function for the user to cycle through it to generate trade signals.
#self._df = GetHistoricalStockData(ticker, startdate, stopdate)
# Get the data from yahoo finance, reorder the data, and then put the data into a dataframe for easy use.
yahoo = yfinance.Share(ticker)
data = yahoo.get_historical(start_date=startdate, end_date=stopdate)
# data comes in reversed order. Put it in ascending order.
data = data[::-1]
# Put the data into a dataframe
df = pd.DataFrame(data=data)
# Load historical data and initialize other values
self.load_hist_data(ticker, df, column)
return df
def load_hist_data(self, ticker, hist_data, column="Adj_Close"):
# Load the specified data set. This is only used if the user loads historical data from a different source
# (forex data for example).
# Inputs
# 1. hist_data - historical data in the format of a dataframe
# 2. column - this is the column in the dataframe to use to get price information from. Default is 'Adj_Close'
self._ticker = ticker
self._df = hist_data
self._tt.InitTickData(self._df)
self._column = column
self._trade_history = [0]*len(self._df[self._column]) # Make trade history the same length as the data
pass
def reset(self):
'''
reset - dataframe is left alone, but all other internal tracking is reset so system can run a new test
:return:
'''
print "TODO: reset still needs to be implemented"
pass
def open_trade(self, index, direction):
'''
:param index: index into the dataframe.
:param direction: direction of the trade (CASH, LONG, or SHORT)
:return: None
'''
# Make sure index is in a valid range
if index < 0 or index > len(self._df[self._column]):
print "open_trade error! index is out of bounds (%d)\n" %index
return False
openprice = self._df.ix[index][self._column]
spread = 0.0
timestamp = self._df.ix[index]['Date']
self._tt.OpenTrade(self._ticker, openprice=openprice, spread=spread, direction=direction, timestamp=timestamp)
return True
def close_trade(self, index):
# Make sure index is in a valid range
if index < 0 or index > len(self._df[self._column]):
print "close_trade error! index is out of bounds (%d)\n" %index
return False
closeprice = self._df.ix[index][self._column]
timestamp = self._df.ix[index]['Date']
self._tt.CloseTrade(closeprice=closeprice, timestamp=timestamp, direction=self.CASH)
return True
def select_bootstrap(self, selection):
'''
set_bootstrap
Set the bootstrap to be used for all subsequent queries. This can be updated at any time to get information
relevant to the specified bootstrap.
:return:
'''
if selection == self.BOOTSTRAP_TTE:
self._bs = TTEBootstrap.TTEBootstrap()
elif selection == self.BOOTSTRAP_MONTE_CARLO:
self._bs = MonteCarloBootstrap.MonteCarloBootstrap()
elif selection == self.BOOTSTRAP_WHITE:
self._bs = WhiteBootstrap.WhiteBootstrap()
else:
print "select_bootstrap error! selection was invaled (%d)\n" %(selection)
print "Valid selections are the following: \n"
print " BOOTSTRAP_TTE, BOOTSTRAP_MONTE_CARLO, BOOTSTRAP_WHITE\n\n"
return False
return True
def get_pvalue(self, iterations=5000):
# Calculate the total return based on what has been tracked in the trade tracker
rule_percent_return = self._tt.GetPercentReturn()
# Initialize the test
self._bs.init_test(self._df, self._column, num_iterations=iterations)
# Determine what the p-value is for this bootstrap method
pvalue = self._bs.has_predictive_power(rule_percent_return)
return pvalue
def get_trade_stats(self):
return self._tt.GetTradeStatsStr()
def print_trade_stats(self):
print "\n", self._tt.GetTradeStatsStr()
pass
def print_trade_history(self):
self._tt.PrintHistory()
pass
def plot_pdf(self):
'''
plot_pdf
# Display a plot showing the probability density function of returns calculated.
:return:
'''
self._bs.plot_histogram()
pass
def plot_trades_equity(self):
'''
plot_trades_equity
Generate a plot that shows the trades and the equity curve for the given dataframe
:return:
'''
#print len(self.pairtimestmps), len(self.pairhistory), len(self.visualRewardHistory)
pyplot.figure(1)
#pyplot.subplot(211)
pyplot.plot(self._df[self._column])
#pyplot.subplot(212)
#pyplot.plot(self.visualRewardHistory)
#pyplot.subplot(313)
#pyplot.plot(self.visualTradeHistory)
#x1,x2,y1,y2 = pyplot.axis()
#pyplot.axis((x1,x2,(y1-0.25), (y2+0.25)))
pyplot.xticks( rotation= 45 )
pyplot.show()
pass
def plot_all(self, title=None):
#pyplot.xlabel('Smarts')
#pyplot.ylabel('Probability')
pyplot.figure(1)
pyplot.subplot(311)
pyplot.title(title)
sample_means = self._bs.get_histogram_data()
pyplot.hist(sample_means, bins=20)
pyplot.grid(True)
pyplot.subplot(312)
pyplot.plot(self._df[self._column])
pyplot.subplot(313)
dates = self._df['Date'].tolist()
x = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates]
pyplot.plot(self._df['Position'])
#pyplot.plot(x, self._df['Position'])
#pyplot.gcf().autofmt_xdate()
pyplot.xticks( rotation= 45 )
x1,x2,y1,y2 = pyplot.axis()
pyplot.axis((x1,x2,(y1-0.25), (y2+0.25)))
pyplot.show()
pass
# --------------------------------------------------------------------------------------------------------------------
# Test functions
# --------------------------------------------------------------------------------------------------------------------
# Default function when the file is run
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
|
# --------------------------------------------------------------------------------------------------------------------
# Patrick Neary
# Fin5350
# Project
# 10/6/2016
#
# TradeHistory.py
#
# This file
# --------------------------------------------------------------------------------------------------------------------
import math
import datetime
import numpy as np
from TradeDetails import TradeDetails
class TradeTracking:
def __init__(self, trackHistory=True, trackDailyPositions=False):
self.totalPL = 0.0
self.totalPercentReturn = 0.0
self.tradeHistory = []
self.currTrade = TradeDetails()
self.trackHistory = trackHistory
self.totalWins = 0
self.totalLosses = 0
self.longWins = 0
self.shortWins = 0
self.longLosses = 0
self.shortLosses = 0
self.tickData = None
self.trackDailyPositions = trackDailyPositions
self.ID = None
self.isTradeOpen = False
self.currTradeDirection = 0
self.currPrice = 0.0
self.CASH = 0
self.LONG = 1
self.SHORT = -1
self.firsttimestamp = None
self.lasttimestamp = None
self.openPrice = 0.0
self.cnt = 0
return
def __str__(self):
tradehistorystr = ""
for trade in self.tradeHistory:
tradehistorystr += trade.__str__() + "\n"
return tradehistorystr
def InitTickData(self, tickData):
# tickData - data frame containing time stamped tick information. A column will be added to this data to
# track every time period's position. 0 - No trade, 1 - Long position, -1 - Short position.
self.tickData = tickData
# Add column to track position for every time period and make sure entries are 0 for 'no trade'
self.tickData['Position'] = np.zeros((len(tickData), 1))
pass
def UpdateTradePositions(self):
# Find and update the positions between the open and close dates in the dataframe. This function is based
# off of values in self.currTrade. This shouldn't be called until after openTimeStamp, closeTimeStamp, and
# tradeDirection have been set.. or after CloseTrade has been called.
# Only run through this if we're tracking daily positions
if self.trackDailyPositions == False:
return
# Iterate through the array looking for relevant time stamps.
index = 0
for idx in self.tickData.iterrows():
#print idx
currtimestamp = datetime.datetime.strptime(self.tickData.ix[index]['Date'], "%Y-%m-%d")
if currtimestamp >= self.currTrade.openTimeStamp and currtimestamp <= self.currTrade.closeTimeStamp:
self.tickData.set_value(index, 'Position', self.currTrade.tradeDirection)
index += 1
pass
def OpenTrade(self, equity, openprice, spread, direction, timestamp, id=None):
if self.firsttimestamp == None:
self.firsttimestamp = timestamp
self.currTrade = TradeDetails()
self.currTrade.OpenTrade(equity, openprice, spread, direction, timestamp, id)
self.ID = id
self.isTradeOpen = True
self.currTradeDirection = direction
self.openPrice = openprice
#print "OpenTrade", equity, openprice, spread, direction, timestamp, id
return
def UpdateStats(self, closeprice):
tradePL = self.currTrade.GetCurrentPL(closeprice)
if tradePL > 0:
if self.currTradeDirection == self.LONG:
self.longWins += 1
else:
self.shortWins += 1
self.totalWins += 1
else:
if self.currTradeDirection == self.LONG:
self.longLosses += 1
else:
self.shortLosses += 1
self.totalLosses += 1
pass
def CloseTrade(self, closeprice, timestamp, direction):
self.lasttimestamp = timestamp
# Close the trade
self.currTrade.CloseTrade(closeprice, timestamp)
tradePL = self.currTrade.GetCurrentPL(closeprice)
self.totalPercentReturn += self.currTrade.GetTradePercentPL()
if tradePL > 0 or self.cnt == 0:
# add trade to the history if enabled
if self.trackHistory == True:
# Drop half of the losing trades
self.tradeHistory.append(self.currTrade)
# Add trade results to total PL
self.totalPL += tradePL
self.currTrade.SetTotalPL(self.totalPL)
# Update stats
self.UpdateStats(closeprice)
# Update trade positions for this trade if it's being tracked
self.UpdateTradePositions()
if tradePL < 0:
if self.cnt < 3:
self.cnt += 1
if self.cnt >= 3:
self.cnt = 0
self.ID = None
self.isTradeOpen = False
self.currTradeDirection = direction
return
def GetTradeCurrPL(self, currPrice):
return self.currTrade.GetCurrentPL(currPrice)
def UpdateCurrPrice(self, currPrice):
self.currPrice = currPrice
pass
def GetTimeStepPL(self, nextPrice):
# This gets the difference between the updated price and the next price. Order of subtraction is based on
# the direction of the trade.
if self.currTradeDirection == self.LONG:
return nextPrice - self.currPrice
elif self.currTradeDirection == self.SHORT:
return self.currPrice - nextPrice
else:
return 0.0
def GetTradeCurrDuration(self):
return self.currTrade.GetTradeDuration()
def GetTotalPL(self):
# This returns the cumulative PL prior to current trade (if any)
return self.totalPL
def GetPercentReturn(self):
# This calculates the percent return using ln(r1) - ln(r2) where r1 and r2 are opening/closing prices
return self.totalPercentReturn
def GetTradeStatsStr(self):
tradestatsstr = ""
totalTrades = max((self.totalWins + self.totalLosses), 1)
tradestatsstr += "Trading Stats:\n"
tradestatsstr += "Total trades:\t %d\n" % totalTrades
tradestatsstr += "Total Wins:\t\t %d, \t%0.2f%%\n" %(self.totalWins, (float(self.totalWins)/totalTrades)*100)
tradestatsstr += "Total Losses:\t %d, \t%0.2f%%\n" %(self.totalLosses, (float(self.totalLosses)/totalTrades)*100)
longTrades = max((self.longWins + self.longLosses), 1)
shortTrades = max((self.shortWins + self.shortLosses), 1)
tradestatsstr += "Long wins:\t\t %d, \t%0.2f%%\n" %(self.longWins, (float(self.longWins)/longTrades)*100)
tradestatsstr += "Long losses:\t %d, \t%0.2f%%\n" %(self.longLosses, (float(self.longLosses)/longTrades)*100)
tradestatsstr += "Short wins:\t\t %d, \t%0.2f%%\n" %(self.shortWins, (float(self.shortWins)/shortTrades)*100)
tradestatsstr += "Short losses:\t %d, \t%0.2f%%\n" %(self.shortLosses, (float(self.shortLosses)/shortTrades)*100)
tradestatsstr += "Total P/L:\t\t %0.2f\n" % self.totalPL
tradestatsstr += "Percent P\L:\t %0.2f\n" % self.totalPercentReturn
tradestatsstr += "First timestamp: %s\n" % self.firsttimestamp
tradestatsstr += "Last timestamp:\t %s\n" % self.lasttimestamp
return tradestatsstr
def PrintHistory(self):
tradehistorystr = ""
for trade in self.tradeHistory:
tradehistorystr += trade.__str__()
print trade
return tradehistorystr
def GetHistory(self):
# Return list of TradeDetails
return self.tradeHistory
def getCurrID(self):
# If application is interested in the ID for the current trade then it will be available (if set).
return self.ID
def GetIsTradeOpen(self):
return self.isTradeOpen
def GetCurrTradeDirection(self):
return self.currTradeDirection
# --------------------------------------------------------------------------------------------------------------------
#
# --------------------------------------------------------------------------------------------------------------------
def ExecuteTestTrades():
CASH = 0
LONG = 1
SHORT = 2
openTS = datetime.datetime(2016, 04, 18)
closeTS = datetime.datetime(2016, 04, 19)
openPrice = 78.8
closePrice = 78.2
spread = 0.032
tt = TradeTracking()
tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS)
tt.CloseTrade(closePrice, closeTS)
print tt
openTS = datetime.datetime(2016, 04, 20)
closeTS = datetime.datetime(2016, 04, 22)
openPrice = 79.0
closePrice = 79.8
spread = 0.032
tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS)
tt.CloseTrade(closePrice, closeTS)
print ""
print tt
return
# --------------------------------------------------------------------------------------------------------------------
# Default function when the file is run
# --------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
ExecuteTestTrades()
|
{
"imported_by": [],
"imports": [
"/TradeTracking/TradeHistory.py"
]
}
|
trineary/TradeTestingEngine
|
/TTEBootstrapTests/MonteCarloBootstrap.py
|
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# kWhiteRealityCheck.py
#
# This file is an implementation of White's Reality Check for evaluating the significance of a trading rule's
# predictive power.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
import random
from matplotlib import pyplot as plt
from BootstrapABC import BootstrapABC
# Import my classes
from BootstrapCalcTools import GetDailyReturns, GetMeanDailyReturn, GetDetrendedReturns, GetPVal
# Global values for selecting different options
# --------------------------------------------------------------------------------------------------------------------
class MonteCarloBootstrap(BootstrapABC):
def __init__(self):
self._sample_means = []
self._rules = []
pass
def init_test(self, df, col_name, num_iterations=5000):
"""
init_test initializes the White Reality Check Bootstrap test
:param df: dataframe containing data to bootstrap
:param col_name: name of colume in data frame containing data
:param daily_rules: list of rules applied to the time series in the data frame. rules take on (+1, -1) values
:param num_iterations: number of iterations to build bootstrap sampling distribution
:return: none
"""
self._df = df
self._detrended_data = None
self._col_name = col_name
self._num_iterations = num_iterations
datalen = len(self._df.index)
#gain = float(self._df.at[datalen-1, col_name]) - float(self._df.at[0, col_name])
#dailyGain = gain/datalen
pass
def plot_histogram(self, bins=20):
if len(self._sample_means) > 0:
plt.hist(self._sample_means, bins=bins)
plt.grid(True)
plt.show()
return
def get_histogram_data(self):
return self._sample_means
def run_monte_carlo_round(self, detrended_data):
# Run through one iteration of pairing daily rules with detrended returns. Calculate the average return
# and return that value.
# check length of detrended data and daily rules. They should be the same length.
if len(detrended_data) != len(self._rules):
print "Monte Carlo error! Detrended data and daily rules not the same length."
return -1
# Get a copy of the detrended data
detrended_copy = detrended_data[0].tolist()
# Cycle through the data now
total_val = 0
tradeDirection = 1
for index in xrange(0, len(detrended_copy)):
index = random.randint(0, len(detrended_copy)-1)
if tradeDirection == 1:
tradeDirection = -1
else:
tradeDirection = 1
total_val += tradeDirection * detrended_copy.pop(index)
#print "total_val: ", total_val
return total_val
def has_predictive_power(self, rule_percent_return):
# Get daily rules from the dataframe
rules = self._df['Position'].tolist()
#print "rules", rules
# Set daily rules
self._rules = rules
# Get one-day market price changes
# Detrend the data
detrended_returns = GetDetrendedReturns(self._df, self._col_name)
# Run through iterations and collect distribution
self._sample_means = []
for i in range(0, self._num_iterations, 1):
avg_val = self.run_monte_carlo_round(detrended_returns)
self._sample_means.append(avg_val)
# Calculate and return the p-value for the sample mean distribution calculated above
return GetPVal(self._sample_means, rule_percent_return)
# --------------------------------------------------------------------------------------------------------------------
# Test functions
def test_monte_carlo_round():
rules = [1, 1, -1, -1, -1]
data = [2, 3, 4, 3, 2]
mc = MonteCarloBootstrap()
mc._rules = rules
mean = mc.run_monte_carlo_round(data)
print "mean result: ", mean
pass
def test_monte_carlo_prediction():
rules = [1, 1, -1, -1, -1]
data = [2, 3, 4, 3, 2]
mc = MonteCarloBootstrap()
mc._rules = rules
mean = mc.run_monte_carlo_round(data)
print "mean result: ", mean
pass
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
#test_monte_carlo_round()
test_monte_carlo_prediction()
|
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# BootstrapCalcTools.py
#
# This file contains tools common to the bootstrap processes.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
import pandas
import math
# --------------------------------------------------------------------------------------------------------------------
def GetDailyReturns(df, colName):
"""
Generate a dataframe containing the mean daily returns from the specified data frame and column name. The daily
returns are calculated using log(day/prev day).
:param df:
:param colName:
:return:
"""
prev = None
returns = []
for index, rowVal in df[colName].iteritems():
if(prev == None):
dreturn = 0.0
else:
dreturn = math.log10(float(rowVal)/prev)
#print index, rowVal, dreturn
prev = float(rowVal)
returns.append(dreturn)
return pandas.DataFrame(data=returns)
def GetMeanDailyReturn(df, colName):
"""
Given the dataframe and column, calculate the daily return for the sequence and then determine the mean daily
return.
:param df:
:param colName:
:return: return the mean along with the dataframe containing the data
"""
dailyReturns = GetDailyReturns(df, colName)
meanDailyReturn = dailyReturns[0].mean()
return meanDailyReturn, dailyReturns
def GetDetrendedReturns(df, col_name):
# Get the daily returns and the mean daily return
meanDailyReturn, dailyreturns = GetMeanDailyReturn(df, col_name)
# Detrend the daily returns by subtracting off the mean daily return
detrended_returns = dailyreturns.apply(lambda x: x-meanDailyReturn)
return detrended_returns
def GetPVal(sample_dist, rule_percent_return):
'''
:param sample_dist: sample distribution, this is assumed to be a distribution around zero
:param rule_percent_return: percent return of the trading rule
:return: return the pvalue associated with the trading rule
'''
lessThanCnt = 0
for meanReturn in sample_dist:
if meanReturn < rule_percent_return:
lessThanCnt += 1
percentage = lessThanCnt/float(len(sample_dist))
#print percentage, lessThanCnt
pval = 1-percentage
return pval
--- FILE SEPARATOR ---
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# BootstrapABC.py
#
# Abstract base class for all tests developed to evaluate rules.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
from abc import ABCMeta, abstractmethod
class BootstrapABC():
"""
Base test class for bootstrap tests.
InitTest will initialize the bootstrap test with data that it needs and parameters needed to build the
sampling distribution.
HasPredictivePower will take a percent gain from a rule and determine what the predictive power is
SaveOutput will generate output for the test.. maybe
"""
__metaclass__ = ABCMeta
@abstractmethod
def init_test(self):
pass
@abstractmethod
def has_predictive_power(self):
pass
#@abstractmethod
#def SaveOutput(self):
#s pass
|
{
"imported_by": [],
"imports": [
"/TTEBootstrapTests/BootstrapCalcTools.py",
"/TTEBootstrapTests/BootstrapABC.py"
]
}
|
trineary/TradeTestingEngine
|
/TTEBootstrapTests/WhiteBootstrap.py
|
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# kWhiteRealityCheck.py
#
# This file is an implementation of White's Reality Check for evaluating the significance of a trading rule's
# predictive power.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
import random
from matplotlib import pyplot as plt
from BootstrapABC import BootstrapABC
# Import my classes
from BootstrapCalcTools import GetDailyReturns, GetMeanDailyReturn, GetPVal
# Global values for selecting different options
# --------------------------------------------------------------------------------------------------------------------
class WhiteBootstrap(BootstrapABC):
def __init__(self):
self._sample_means = []
self._df = None
self._detrended_data = None
self._col_name = None
self._num_iterations = None
pass
def init_test(self, df, col_name, num_iterations=5000):
"""
init_test initializes the White Reality Check Bootstrap test
:param df: dataframe containing data to bootstrap
:param col_name: name of colume in data frame containing data
:param num_iterations: number of iterations to build bootstrap sampling distribution
:return: none
"""
self._df = df
self._detrended_data = None
self._col_name = col_name
self._num_iterations = num_iterations
datalen = len(self._df.index)
# Detrend the data
meanDailyReturn, dailyreturns = GetMeanDailyReturn(self._df, self._col_name)
dailyreturns = dailyreturns.apply(lambda x: x-meanDailyReturn)
# Iterate over the daily returns and build a distribution of returns
meanList = []
for meanCount in xrange(0, self._num_iterations):
sampleSum = 0
for randomReturn in xrange(0, datalen):
index = random.randint(0, datalen-1)
sampleSum += dailyreturns.iat[index, 0]
#sampleMean = sampleSum #/ datalen
#meanList.append(sampleMean)
meanList.append(sampleSum)
#histogram, edges = np.histogram(meanList, bins=10)
self._sample_means = meanList
pass
def plot_histogram(self, bins=20):
if len(self._sample_means) > 0:
plt.hist(self._sample_means, bins=bins)
plt.grid(True)
plt.show()
return
def get_histogram_data(self):
return self._sample_means
def has_predictive_power(self, rule_percent_return):
return GetPVal(self._sample_means, rule_percent_return)
# --------------------------------------------------------------------------------------------------------------------
# Test functions
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
|
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# BootstrapCalcTools.py
#
# This file contains tools common to the bootstrap processes.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
import pandas
import math
# --------------------------------------------------------------------------------------------------------------------
def GetDailyReturns(df, colName):
"""
Generate a dataframe containing the mean daily returns from the specified data frame and column name. The daily
returns are calculated using log(day/prev day).
:param df:
:param colName:
:return:
"""
prev = None
returns = []
for index, rowVal in df[colName].iteritems():
if(prev == None):
dreturn = 0.0
else:
dreturn = math.log10(float(rowVal)/prev)
#print index, rowVal, dreturn
prev = float(rowVal)
returns.append(dreturn)
return pandas.DataFrame(data=returns)
def GetMeanDailyReturn(df, colName):
"""
Given the dataframe and column, calculate the daily return for the sequence and then determine the mean daily
return.
:param df:
:param colName:
:return: return the mean along with the dataframe containing the data
"""
dailyReturns = GetDailyReturns(df, colName)
meanDailyReturn = dailyReturns[0].mean()
return meanDailyReturn, dailyReturns
def GetDetrendedReturns(df, col_name):
# Get the daily returns and the mean daily return
meanDailyReturn, dailyreturns = GetMeanDailyReturn(df, col_name)
# Detrend the daily returns by subtracting off the mean daily return
detrended_returns = dailyreturns.apply(lambda x: x-meanDailyReturn)
return detrended_returns
def GetPVal(sample_dist, rule_percent_return):
'''
:param sample_dist: sample distribution, this is assumed to be a distribution around zero
:param rule_percent_return: percent return of the trading rule
:return: return the pvalue associated with the trading rule
'''
lessThanCnt = 0
for meanReturn in sample_dist:
if meanReturn < rule_percent_return:
lessThanCnt += 1
percentage = lessThanCnt/float(len(sample_dist))
#print percentage, lessThanCnt
pval = 1-percentage
return pval
--- FILE SEPARATOR ---
# --------------------------------------------------------------------------------------------------------------------
#
# Patrick Neary
# Date: 11/12/2016
#
# Fin 5350 / Dr. Tyler J. Brough
# Trade Testing Engine:
#
# BootstrapABC.py
#
# Abstract base class for all tests developed to evaluate rules.
#
# --------------------------------------------------------------------------------------------------------------------
# Import standard packages
from abc import ABCMeta, abstractmethod
class BootstrapABC():
"""
Base test class for bootstrap tests.
InitTest will initialize the bootstrap test with data that it needs and parameters needed to build the
sampling distribution.
HasPredictivePower will take a percent gain from a rule and determine what the predictive power is
SaveOutput will generate output for the test.. maybe
"""
__metaclass__ = ABCMeta
@abstractmethod
def init_test(self):
pass
@abstractmethod
def has_predictive_power(self):
pass
#@abstractmethod
#def SaveOutput(self):
#s pass
|
{
"imported_by": [],
"imports": [
"/TTEBootstrapTests/BootstrapCalcTools.py",
"/TTEBootstrapTests/BootstrapABC.py"
]
}
|
trineary/TradeTestingEngine
|
/TradeTracking/TradeHistory.py
|
# --------------------------------------------------------------------------------------------------------------------
# Patrick Neary
# Fin5350
# Project
# 10/6/2016
#
# TradeHistory.py
#
# This file
# --------------------------------------------------------------------------------------------------------------------
import math
import datetime
import numpy as np
from TradeDetails import TradeDetails
class TradeTracking:
def __init__(self, trackHistory=True, trackDailyPositions=False):
self.totalPL = 0.0
self.totalPercentReturn = 0.0
self.tradeHistory = []
self.currTrade = TradeDetails()
self.trackHistory = trackHistory
self.totalWins = 0
self.totalLosses = 0
self.longWins = 0
self.shortWins = 0
self.longLosses = 0
self.shortLosses = 0
self.tickData = None
self.trackDailyPositions = trackDailyPositions
self.ID = None
self.isTradeOpen = False
self.currTradeDirection = 0
self.currPrice = 0.0
self.CASH = 0
self.LONG = 1
self.SHORT = -1
self.firsttimestamp = None
self.lasttimestamp = None
self.openPrice = 0.0
self.cnt = 0
return
def __str__(self):
tradehistorystr = ""
for trade in self.tradeHistory:
tradehistorystr += trade.__str__() + "\n"
return tradehistorystr
def InitTickData(self, tickData):
# tickData - data frame containing time stamped tick information. A column will be added to this data to
# track every time period's position. 0 - No trade, 1 - Long position, -1 - Short position.
self.tickData = tickData
# Add column to track position for every time period and make sure entries are 0 for 'no trade'
self.tickData['Position'] = np.zeros((len(tickData), 1))
pass
def UpdateTradePositions(self):
# Find and update the positions between the open and close dates in the dataframe. This function is based
# off of values in self.currTrade. This shouldn't be called until after openTimeStamp, closeTimeStamp, and
# tradeDirection have been set.. or after CloseTrade has been called.
# Only run through this if we're tracking daily positions
if self.trackDailyPositions == False:
return
# Iterate through the array looking for relevant time stamps.
index = 0
for idx in self.tickData.iterrows():
#print idx
currtimestamp = datetime.datetime.strptime(self.tickData.ix[index]['Date'], "%Y-%m-%d")
if currtimestamp >= self.currTrade.openTimeStamp and currtimestamp <= self.currTrade.closeTimeStamp:
self.tickData.set_value(index, 'Position', self.currTrade.tradeDirection)
index += 1
pass
def OpenTrade(self, equity, openprice, spread, direction, timestamp, id=None):
if self.firsttimestamp == None:
self.firsttimestamp = timestamp
self.currTrade = TradeDetails()
self.currTrade.OpenTrade(equity, openprice, spread, direction, timestamp, id)
self.ID = id
self.isTradeOpen = True
self.currTradeDirection = direction
self.openPrice = openprice
#print "OpenTrade", equity, openprice, spread, direction, timestamp, id
return
def UpdateStats(self, closeprice):
tradePL = self.currTrade.GetCurrentPL(closeprice)
if tradePL > 0:
if self.currTradeDirection == self.LONG:
self.longWins += 1
else:
self.shortWins += 1
self.totalWins += 1
else:
if self.currTradeDirection == self.LONG:
self.longLosses += 1
else:
self.shortLosses += 1
self.totalLosses += 1
pass
def CloseTrade(self, closeprice, timestamp, direction):
self.lasttimestamp = timestamp
# Close the trade
self.currTrade.CloseTrade(closeprice, timestamp)
tradePL = self.currTrade.GetCurrentPL(closeprice)
self.totalPercentReturn += self.currTrade.GetTradePercentPL()
if tradePL > 0 or self.cnt == 0:
# add trade to the history if enabled
if self.trackHistory == True:
# Drop half of the losing trades
self.tradeHistory.append(self.currTrade)
# Add trade results to total PL
self.totalPL += tradePL
self.currTrade.SetTotalPL(self.totalPL)
# Update stats
self.UpdateStats(closeprice)
# Update trade positions for this trade if it's being tracked
self.UpdateTradePositions()
if tradePL < 0:
if self.cnt < 3:
self.cnt += 1
if self.cnt >= 3:
self.cnt = 0
self.ID = None
self.isTradeOpen = False
self.currTradeDirection = direction
return
def GetTradeCurrPL(self, currPrice):
return self.currTrade.GetCurrentPL(currPrice)
def UpdateCurrPrice(self, currPrice):
self.currPrice = currPrice
pass
def GetTimeStepPL(self, nextPrice):
# This gets the difference between the updated price and the next price. Order of subtraction is based on
# the direction of the trade.
if self.currTradeDirection == self.LONG:
return nextPrice - self.currPrice
elif self.currTradeDirection == self.SHORT:
return self.currPrice - nextPrice
else:
return 0.0
def GetTradeCurrDuration(self):
return self.currTrade.GetTradeDuration()
def GetTotalPL(self):
# This returns the cumulative PL prior to current trade (if any)
return self.totalPL
def GetPercentReturn(self):
# This calculates the percent return using ln(r1) - ln(r2) where r1 and r2 are opening/closing prices
return self.totalPercentReturn
def GetTradeStatsStr(self):
tradestatsstr = ""
totalTrades = max((self.totalWins + self.totalLosses), 1)
tradestatsstr += "Trading Stats:\n"
tradestatsstr += "Total trades:\t %d\n" % totalTrades
tradestatsstr += "Total Wins:\t\t %d, \t%0.2f%%\n" %(self.totalWins, (float(self.totalWins)/totalTrades)*100)
tradestatsstr += "Total Losses:\t %d, \t%0.2f%%\n" %(self.totalLosses, (float(self.totalLosses)/totalTrades)*100)
longTrades = max((self.longWins + self.longLosses), 1)
shortTrades = max((self.shortWins + self.shortLosses), 1)
tradestatsstr += "Long wins:\t\t %d, \t%0.2f%%\n" %(self.longWins, (float(self.longWins)/longTrades)*100)
tradestatsstr += "Long losses:\t %d, \t%0.2f%%\n" %(self.longLosses, (float(self.longLosses)/longTrades)*100)
tradestatsstr += "Short wins:\t\t %d, \t%0.2f%%\n" %(self.shortWins, (float(self.shortWins)/shortTrades)*100)
tradestatsstr += "Short losses:\t %d, \t%0.2f%%\n" %(self.shortLosses, (float(self.shortLosses)/shortTrades)*100)
tradestatsstr += "Total P/L:\t\t %0.2f\n" % self.totalPL
tradestatsstr += "Percent P\L:\t %0.2f\n" % self.totalPercentReturn
tradestatsstr += "First timestamp: %s\n" % self.firsttimestamp
tradestatsstr += "Last timestamp:\t %s\n" % self.lasttimestamp
return tradestatsstr
def PrintHistory(self):
tradehistorystr = ""
for trade in self.tradeHistory:
tradehistorystr += trade.__str__()
print trade
return tradehistorystr
def GetHistory(self):
# Return list of TradeDetails
return self.tradeHistory
def getCurrID(self):
# If application is interested in the ID for the current trade then it will be available (if set).
return self.ID
def GetIsTradeOpen(self):
return self.isTradeOpen
def GetCurrTradeDirection(self):
return self.currTradeDirection
# --------------------------------------------------------------------------------------------------------------------
#
# --------------------------------------------------------------------------------------------------------------------
def ExecuteTestTrades():
CASH = 0
LONG = 1
SHORT = 2
openTS = datetime.datetime(2016, 04, 18)
closeTS = datetime.datetime(2016, 04, 19)
openPrice = 78.8
closePrice = 78.2
spread = 0.032
tt = TradeTracking()
tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS)
tt.CloseTrade(closePrice, closeTS)
print tt
openTS = datetime.datetime(2016, 04, 20)
closeTS = datetime.datetime(2016, 04, 22)
openPrice = 79.0
closePrice = 79.8
spread = 0.032
tt.OpenTrade("AUDJPY", openPrice, spread, LONG, openTS)
tt.CloseTrade(closePrice, closeTS)
print ""
print tt
return
# --------------------------------------------------------------------------------------------------------------------
# Default function when the file is run
# --------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
ExecuteTestTrades()
|
# --------------------------------------------------------------------------------------------------------------------
# Patrick Neary
# CS 6110
# Project
# 10/6/2016
#
# TradeDetails.py
#
# This file
# --------------------------------------------------------------------------------------------------------------------
import datetime
import math
class TradeDetails:
CASH = 0
LONG = 1
SHORT = -1
def __init__(self):
self.openPrice = 0.0
self.closePrice = 0.0
self.spread = 0.0
self.tradeDirection = self.CASH
self.equityName = ""
self.openTimeStamp = None
self.closeTimeStamp = None
self.duration = None
self.currPL = 0.0
self.stopLoss = None
self.profitTarget = None
self.totalPL = 0.0
self.ID = None
return
def __str__(self):
mystr = "%s, %s, %s, %s, %s, %s, %s, %s, %s" % (self.equityName, self.openTimeStamp, self.closeTimeStamp,
self.duration, self.openPrice, self.closePrice, self.currPL, self.totalPL, self.ID)
return mystr
def OpenTrade(self, equity, openprice, spread, direction, timestamp, id=None):
# timestamp - needs to be a string in format of "year-month-day" or in datetime format.
if isinstance(timestamp, str) == True:
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%d")
# Check to make sure timestamp is a date/time format
if isinstance(timestamp, datetime.datetime) == False:
print "Timestamp needs to be in datetime format"
return
self.openPrice = openprice
self.equityName = equity
self.spread = spread
self.tradeDirection = direction
self.openTimeStamp = timestamp
self.ID = id # ID of entity making the trade
return
def CloseTrade(self, closeprice, timestamp):
# timestamp - needs to be a string in format of "year-month-day" or in datetime format.
if isinstance(timestamp, str) == True:
timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%d")
# Check to make sure timestamp is a date/time format
if isinstance(timestamp, datetime.datetime) == False:
print "Timestamp needs to be in datetime format"
return
# Close the trade
self.closePrice = closeprice
self.closeTimeStamp = timestamp
#self.tradeDirection = self.CASH
self.GetCurrentPL(closeprice)
self.GetTradeDuration()
#self.ID = None
return
def GetCurrentPL(self, currprice):
# Calculate the change in price from open to now. This includes the cost of the spread.
if self.tradeDirection is self.CASH:
self.currPL = 0.0
elif self.tradeDirection is self.SHORT:
self.currPL = float(self.openPrice) - float(currprice) - float(self.spread)
else:
self.currPL = float(currprice) - float(self.openPrice) - float(self.spread)
#print "GetCurrentPL: ", self.currPL, self.tradeDirection, self.spread
return self.currPL
def GetTradePercentPL(self):
if self.tradeDirection is self.CASH:
totalPercentReturn = 0.0
elif self.tradeDirection is self.SHORT:
totalPercentReturn = math.log10(float(self.openPrice)) - math.log10(float(self.closePrice))
else:
totalPercentReturn = math.log10(float(self.closePrice)) - math.log10(float(self.openPrice))
return totalPercentReturn
def GetTradeDuration(self):
duration = self.closeTimeStamp - self.openTimeStamp
self.duration = duration
return self.duration
def RedefineDirection(self, cash, long, short):
self.CASH = cash
self.LONG = long
self.SHORT = short
return
def SetTotalPL(self, totalPL):
self.totalPL = totalPL
return
def GetCurrentTradeID(self):
return self.ID
# --------------------------------------------------------------------------------------------------------------------
#
# --------------------------------------------------------------------------------------------------------------------
def TestTradeDetails():
openTS = datetime.datetime(2016, 04, 18)
closeTS = datetime.datetime(2016, 04, 19)
openPrice = 78.8
closePrice = 78.2
spread = 0.032
td = TradeDetails()
td.OpenTrade("AUDJPY", openPrice, spread, 1, openTS)
td.CloseTrade(closePrice, closeTS)
print td
return
# --------------------------------------------------------------------------------------------------------------------
# Default function when the file is run
# --------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
# Functions to run if this file is executed
print "Run default function for ", __file__
TestTradeDetails()
|
{
"imported_by": [
"/TTE.py"
],
"imports": [
"/TradeTracking/TradeDetails.py"
]
}
|
Tadaboody/good_smell
|
/docs/generate_smell_doc.py
|
from tests.test_collection import collect_tests, test_case_files
def generate_smell_docs():
for example_test in [list(collect_tests(file))[0] for file in test_case_files]:
desc, symbols, before, after = example_test
symbol = list(symbols)[0]
print(
f"""### {desc} ({symbol})
```py
{before}```
Will be fixed to
```py
{after}```"""
)
if __name__ == "__main__":
generate_smell_docs()
|
import ast
import itertools
from os import PathLike
from pathlib import Path
from typing import Iterator, NamedTuple, Set
import astor
import black
import pytest
from good_smell import fix_smell, smell_warnings
FILE_DIR = Path(__file__).parent
EXAMPLES_DIR = FILE_DIR / "examples"
def normalize_formatting(code: str) -> str:
"""Returns a string of the code with normalized formatting for easier compares"""
code = astor.to_source(ast.parse(code))
try:
return black.format_file_contents(code, fast=True, mode=black.Mode())
except black.NothingChanged:
return code
class CollectedTest(NamedTuple):
desc: str
error_symbols: Set[str]
before: int
after: str
def is_title(line: str) -> bool:
return line.startswith(TITLE_PREFIX)
TITLE_PREFIX = "#:"
BEFORE_AFTER_SPLITTER = "==>"
END_SYMBOL = "END"
SPECIAL_SYMBOLS = (TITLE_PREFIX, BEFORE_AFTER_SPLITTER, END_SYMBOL)
def collect_tests(path: PathLike) -> Iterator[CollectedTest]:
"""Collects all test cases listed in `path`"""
with open(path) as fp:
lines = fp.readlines()
lines_iter = iter(lines) # Create iterator for continued iteration
for line_num, line in enumerate(line for line in lines_iter if is_title(line)):
desc = line.strip("#:").strip()
symbols_line = next(lines_iter).strip("#").strip()
symbols = {symbol for symbol in symbols_line.split(",") if symbol != "None"}
before = "".join(
itertools.takewhile(lambda l: BEFORE_AFTER_SPLITTER not in l, lines_iter)
)
after = "".join(itertools.takewhile(lambda l: END_SYMBOL not in l, lines_iter))
collected_test = CollectedTest(
desc=desc, error_symbols=symbols, before=before, after=after
)
if any(
symbol in field
for field, symbol in itertools.product(collected_test, SPECIAL_SYMBOLS)
):
raise Exception(
f"""Wrongly formatted example in {path}:{line_num}
{collected_test}"""
)
yield collected_test
def test_collect_tests():
example_path = EXAMPLES_DIR / "example.py"
collected_tests = list(collect_tests(example_path))
assert len(collected_tests) == 2
case_with_symbol, case_with_no_symbol = collected_tests
assert case_with_symbol.desc == "example"
assert case_with_symbol.error_symbols == {"example-symbol", "another-one"}
assert case_with_symbol.before == """before = 0\nbefore = 1\n"""
assert case_with_symbol.after == """after = 0\nafter = 1\n"""
assert case_with_no_symbol.error_symbols == set()
test_case_files = [f for f in EXAMPLES_DIR.iterdir() if "example" not in f.name]
def params_from_file():
for file in test_case_files:
yield from (
pytest.param(
case.before,
case.after,
case.error_symbols,
id=f"{file.with_suffix('').name}:{case.desc}",
)
for case in collect_tests(file)
)
@pytest.mark.parametrize(["before", "_", "symbols"], params_from_file())
def test_smell_warning(before, _, symbols):
assert set(symbols) == {smell.symbol for smell in smell_warnings(before)}
@pytest.mark.parametrize(["before", "after", "_"], list(params_from_file()))
def test_smell_fixing(before, after, _):
assert normalize_formatting(fix_smell(before)) == normalize_formatting(after)
|
{
"imported_by": [],
"imports": [
"/tests/test_collection.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/__init__.py
|
# flake8:noqa
try:
from importlib import metadata
except ImportError:
# Running on pre-3.8 Python; use importlib-metadata package
import importlib_metadata as metadata
__version__ = metadata.version("good-smell")
from .smell_warning import SmellWarning
from .lint_smell import LintSmell
from .ast_smell import AstSmell, LoggingTransformer
from .smells import implemented_smells
from .main import fix_smell, print_fixed_smell, main, smell_warnings
from . import smells # Allow importing good_smell.smells
from .flake8_ext import LintingFlake8
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
--- FILE SEPARATOR ---
import ast
from typing import Generator, Tuple
from good_smell import SmellWarning, implemented_smells, __version__
class LintingFlake8:
"""Entry point good smell to be used as a flake8 linting plugin"""
name = "good-smell"
version = __version__
def __init__(self, tree: ast.AST, filename: str):
""""http://flake8.pycqa.org/en/latest/plugin-development/plugin-parameters.html"""
self.tree = tree
self.filename = filename
def run(self) -> Generator[Tuple[int, int, str, str], None, None]:
for num, smell in enumerate(implemented_smells):
warnings = smell(
transform=False, tree=self.tree, path=self.filename
).check_for_smell()
warning: SmellWarning
yield from (
(
warning.row,
warning.col,
f"SML{str(num).zfill(3)} {warning.msg}",
"GoodSmell",
)
for warning in warnings
)
--- FILE SEPARATOR ---
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
--- FILE SEPARATOR ---
from pathlib import Path
from typing import Iterable, Type
from fire import Fire
from good_smell import LintSmell, SmellWarning, implemented_smells
def print_smell_warnings(path: str):
"""Prints any warning messages about smells"""
print(
"\n".join(
warning.warning_string()
for warning in smell_warnings(Path(path).read_text(), path)
)
)
def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]:
for smell in implemented_smells:
yield from smell.from_source(
source_code=source, path=str(path), transform=False
).check_for_smell()
def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None):
"""Prints a fixed version of `source`"""
pathlib_path = Path(path)
source = pathlib_path.read_text()
print(fix_smell(source, starting_line, end_line))
def fix_smell(
source: str, starting_line: int = 0, end_line: int = None, path: str = None
) -> str:
"""Returns a fixed version of `source`"""
smell: Type[LintSmell]
for smell in implemented_smells:
source = smell.from_source(
source_code=source,
start_line=starting_line,
end_line=end_line,
path=path,
transform=True,
).fix_smell()
return source
def main():
Fire({"fix": print_fixed_smell})
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{
"imported_by": [],
"imports": [
"/good_smell/lint_smell.py",
"/good_smell/flake8_ext.py",
"/good_smell/ast_smell.py",
"/good_smell/main.py",
"/good_smell/smell_warning.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/ast_smell.py
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
--- FILE SEPARATOR ---
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{
"imported_by": [
"/good_smell/__init__.py",
"/good_smell/smells/filter.py",
"/good_smell/smells/join_literal.py",
"/good_smell/smells/nested_for.py",
"/good_smell/smells/range_len_fix.py",
"/good_smell/smells/yield_from.py"
],
"imports": [
"/good_smell/lint_smell.py",
"/good_smell/smell_warning.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/flake8_ext.py
|
import ast
from typing import Generator, Tuple
from good_smell import SmellWarning, implemented_smells, __version__
class LintingFlake8:
"""Entry point good smell to be used as a flake8 linting plugin"""
name = "good-smell"
version = __version__
def __init__(self, tree: ast.AST, filename: str):
""""http://flake8.pycqa.org/en/latest/plugin-development/plugin-parameters.html"""
self.tree = tree
self.filename = filename
def run(self) -> Generator[Tuple[int, int, str, str], None, None]:
for num, smell in enumerate(implemented_smells):
warnings = smell(
transform=False, tree=self.tree, path=self.filename
).check_for_smell()
warning: SmellWarning
yield from (
(
warning.row,
warning.col,
f"SML{str(num).zfill(3)} {warning.msg}",
"GoodSmell",
)
for warning in warnings
)
|
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{
"imported_by": [
"/good_smell/__init__.py"
],
"imports": [
"/good_smell/smell_warning.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/lint_smell.py
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
|
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{
"imported_by": [
"/good_smell/__init__.py",
"/good_smell/ast_smell.py",
"/good_smell/main.py"
],
"imports": [
"/good_smell/smell_warning.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/main.py
|
from pathlib import Path
from typing import Iterable, Type
from fire import Fire
from good_smell import LintSmell, SmellWarning, implemented_smells
def print_smell_warnings(path: str):
"""Prints any warning messages about smells"""
print(
"\n".join(
warning.warning_string()
for warning in smell_warnings(Path(path).read_text(), path)
)
)
def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]:
for smell in implemented_smells:
yield from smell.from_source(
source_code=source, path=str(path), transform=False
).check_for_smell()
def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None):
"""Prints a fixed version of `source`"""
pathlib_path = Path(path)
source = pathlib_path.read_text()
print(fix_smell(source, starting_line, end_line))
def fix_smell(
source: str, starting_line: int = 0, end_line: int = None, path: str = None
) -> str:
"""Returns a fixed version of `source`"""
smell: Type[LintSmell]
for smell in implemented_smells:
source = smell.from_source(
source_code=source,
start_line=starting_line,
end_line=end_line,
path=path,
transform=True,
).fix_smell()
return source
def main():
Fire({"fix": print_fixed_smell})
if __name__ == "__main__":
main()
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
--- FILE SEPARATOR ---
from typing import NamedTuple
FLAKE8_FORMAT = "{path}:{row}:{col} {symbol} {msg}"
PYLINT_FORMAT = "{path}:{line}:{column}: {msg} ({symbol})"
def to_dict(namedtuple: NamedTuple) -> dict:
return dict(zip(namedtuple._fields, list(namedtuple)))
class SmellWarning(NamedTuple):
"""Class to represent a warning message about a smell"""
row: int
col: int
path: str
msg: str
symbol: str
def warning_string(self, formatter: str = PYLINT_FORMAT):
return formatter.format(**to_dict(self))
|
{
"imported_by": [
"/good_smell/__init__.py",
"/tests/test_collection.py",
"/tests/test_enumerate_fix.py"
],
"imports": [
"/good_smell/lint_smell.py",
"/good_smell/smell_warning.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/__init__.py
|
from .filter import FilterIterator
from .join_literal import JoinLiteral
from .nested_for import NestedFor
from .range_len_fix import RangeLenSmell
from .yield_from import YieldFrom
implemented_smells = (RangeLenSmell, NestedFor, FilterIterator, YieldFrom, JoinLiteral)
|
import ast
import typing
from good_smell import AstSmell, LoggingTransformer
class NameInNode(LoggingTransformer):
def __init__(self, name: ast.Name):
self.name = name
super().__init__(transform=False)
def is_smelly(self, node: ast.AST) -> bool:
return isinstance(node, ast.Name) and node.id == self.name.id
def name_in_node(node: ast.AST, name: ast.Name) -> bool:
"""Checks if the node `name` is in `node`"""
checker = NameInNode(name)
checker.visit(node)
return bool(checker.transformed_nodes)
class NestedFor(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return NestedForTransformer
@property
def warning_message(self):
return "Consider using a nested comprehension instead of a nested for"
@property
def symbol(self):
return "nested-for"
class NestedForTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the nested `for`s and replaces them
with itertools.product"""
def visit_For(self, node: ast.For) -> ast.For:
inner_for: ast.For = node.body[0]
new_target = ast.Tuple(elts=[node.target, inner_for.target])
def create_comprehension(for_node: ast.For) -> ast.comprehension:
return ast.comprehension(target=for_node.target, iter=for_node.iter, ifs=[])
gen_exp = ast.GeneratorExp(
elt=new_target,
generators=[create_comprehension(node), create_comprehension(inner_for)],
)
new_for = ast.For(
target=new_target, iter=gen_exp, body=inner_for.body, orelse=node.orelse
)
new_for = ast.fix_missing_locations(new_for)
return new_for
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and isinstance(node.body[0], ast.For)
and len(node.body) == 1
# Check there's no dependancy between nodes
and not any(
name_in_node(node.body[0].iter, target)
for target in for_target_names(node)
)
)
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
def for_target_names(node: ast.For) -> typing.List[ast.Name]:
"""Returns the names that are the targets of the for loop."""
target = typing.cast(typing.Union[ast.Tuple, ast.Name], node.target)
return target.elts if isinstance(target, ast.Tuple) else [target]
--- FILE SEPARATOR ---
import ast
from good_smell import AstSmell, LoggingTransformer
from typing import Union, Container
class RangeLenSmell(AstSmell):
@property
def transformer_class(self):
return EnumerateFixer
@property
def symbol(self):
return "range-len"
@property
def warning_message(self) -> str:
return "Instead of using a c-style for loop, try using enumerate!"
class AssignDeleter(ast.NodeTransformer):
def __init__(self, seq: ast.Name, target: ast.Name):
self.id = target
self.seq = seq
self.elem_target = None or ast.Name(id="elm", ctx=ast.Store())
self.uses_seq = False
def visit_Assign(self, node: ast.Assign):
"""Deletes a node if it assigning using the for target"""
if self.accesses_seq(node.value):
self.elem_target = node.targets[0]
return None
return self.generic_visit(node)
@staticmethod
def __get_slice_id(node: ast.Subscript) -> Container[str]:
"""Get slice identifier.
Needed because in python3.9 ast.Subscript.slice became a ast.Name, instead of a ast.Index."""
slice = node.slice
if isinstance(slice, ast.Name):
return [slice.id]
if isinstance(slice, ast.Index):
return [slice.value.id]
if isinstance(slice, ast.Slice):
return [slice.upper, slice.lower]
def accesses_seq(self, node) -> bool:
"""Checks if the node acceses the sequence[target]"""
if (
isinstance(node, ast.Subscript)
and self.id.id in self.__get_slice_id(node)
and node.value.id == self.seq.id
):
self.uses_seq = True
return True
def visit_Subscript(self, node: ast.Subscript):
if self.accesses_seq(node):
return self.elem_target
return self.generic_visit(node)
class EnumerateFixer(LoggingTransformer):
def visit_For(self, node: ast.For) -> Union[bool, ast.For]:
enumerate_node = ast.Name(id="enumerate", ctx=ast.Load())
node_iterable = node.iter.args[0].args[0]
original_target = node.target
deleter = AssignDeleter(target=original_target, seq=node_iterable)
new_body = deleter.visit(node).body or [ast.Pass()]
elm_target = (
deleter.elem_target
if deleter.uses_seq
else ast.Name(id="_", ctx=ast.Store())
)
# for (original_target,elm_target) in enumerate(node_iterable):
new_node = ast.For(
target=ast.Tuple(elts=[original_target, elm_target], ctx=ast.Store()),
iter=ast.Call(func=enumerate_node, args=[node_iterable], keywords=[]),
body=new_body,
orelse=node.orelse,
)
new_node = ast.fix_missing_locations(ast.copy_location(new_node, node))
new_node = self.generic_visit(new_node)
return new_node
@staticmethod
def is_smelly(node: ast.For):
try:
return node.iter.func.id == "range" and node.iter.args[0].func.id == "len"
except AttributeError:
return False
--- FILE SEPARATOR ---
import ast
from good_smell import AstSmell, LoggingTransformer
try:
# ast.Str is deprecated in py3.8 and will be removed
StrConst = (ast.Constant, ast.Str)
except AttributeError:
StrConst = (ast.Constant,)
class JoinLiteral(AstSmell):
"""Checks if joining a literal of a sequence."""
@property
def transformer_class(self):
return Transformer
@property
def warning_message(self):
return (
"Consider using str.format instead of joining a constant amount of strings."
)
@property
def symbol(self):
return "join-literal"
class Transformer(LoggingTransformer):
"""Checks for usages of str.join with a constant amount of arguments."""
@staticmethod
def normalize_constant(node) -> ast.Constant:
"""Compatibility wrapper for py3.8+, ast, ast.Str and ast.Num are replaced by ast.Constant.
We don't type annotate `node` so it doesn't break on py3.10+ when these classes will be removed.
"""
for attr in ["value", "s", "n"]:
try:
return ast.Constant(value=getattr(node, attr))
except AttributeError:
pass
raise ValueError("Not a constat.")
def visit_Call(self, node: ast.Call) -> ast.Call:
format_arguments = node.args[0].elts
format_delimiter = self.normalize_constant(node.func.value).value
format_string = format_delimiter.join(["{}"] * len(format_arguments))
new_call = ast.Call(
func=ast.Attribute(
value=ast.Constant(value=format_string), attr="format", ctx=ast.Load()
),
args=format_arguments,
keywords=[],
)
return ast.fix_missing_locations(new_call)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, StrConst)
and node.func.attr == "join"
and len(node.args) == 1
and isinstance(node.args[0], ast.List)
and not any(isinstance(el, ast.Starred) for el in node.args[0].elts)
)
--- FILE SEPARATOR ---
from typing import TypeVar
import ast
from typing import cast
from good_smell import AstSmell, LoggingTransformer
class NameReplacer(ast.NodeTransformer):
def __init__(self, old: ast.Name, new: ast.AST):
self.old = old
self.new = new
def visit_Name(self, node: ast.Name) -> ast.AST:
if node.id == self.old.id:
return self.new
return node
T = TypeVar("T", bound=ast.AST)
def replace_name_with_node(node: T, old_val: ast.Name, new_val: ast.AST) -> T:
"""Returns `node` with all occurences of `old_val` (a variable) replaced with `new_val` (an expression)"""
return NameReplacer(old_val, new_val).visit(node)
class FilterTransformer(LoggingTransformer):
"""Bumps the filter to the iterator"""
def visit_For(self, node: ast.For) -> ast.For:
if_node: ast.If = node.body[0]
filter_condition: ast.Expr = if_node.test
if not isinstance(node.iter, ast.GeneratorExp):
# Create a generator expression if it doesn't exist
GEN_ELT_NAME = "x"
gen_exp: ast.GeneratorExp = cast(
ast.GeneratorExp,
ast_node(f"({GEN_ELT_NAME} for {GEN_ELT_NAME} in seq)").value,
)
gen_target = ast_node(GEN_ELT_NAME).value
iter_comprehension = gen_exp.generators[0]
iter_comprehension.iter = replace_name_with_node(
node.iter, node.target, gen_target
)
else:
gen_exp = node.iter
iter_comprehension = gen_exp.generators[0]
gen_target = gen_exp.elt
iter_comprehension.ifs.append(
replace_name_with_node(filter_condition, node.target, gen_target)
)
node.iter = gen_exp
node.body = if_node.body
return node
def is_smelly(self, node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.If)
)
class FilterIterator(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return FilterTransformer
@property
def warning_message(self):
return "Consider using itertools.product instead of a nested for"
@property
def symbol(self) -> str:
return "filter-iterator"
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
--- FILE SEPARATOR ---
from good_smell import AstSmell, LoggingTransformer
import ast
class YieldFrom(AstSmell):
"""Checks for yields inside for loops"""
@property
def transformer_class(self):
return YieldFromTransformer
@property
def warning_message(self):
return "Consider using yield from instead of yield inside of a for loop"
@property
def symbol(self):
return "yield-from"
class YieldFromTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the yields in fors and replaces them
with yield from"""
def visit_For(self, node: ast.For):
yield_from = ast.Expr(value=ast.YieldFrom(node.iter))
return ast.fix_missing_locations(yield_from)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is a yield inside a for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Yield)
)
|
{
"imported_by": [],
"imports": [
"/good_smell/smells/nested_for.py",
"/good_smell/smells/range_len_fix.py",
"/good_smell/smells/join_literal.py",
"/good_smell/smells/filter.py",
"/good_smell/smells/yield_from.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/filter.py
|
from typing import TypeVar
import ast
from typing import cast
from good_smell import AstSmell, LoggingTransformer
class NameReplacer(ast.NodeTransformer):
def __init__(self, old: ast.Name, new: ast.AST):
self.old = old
self.new = new
def visit_Name(self, node: ast.Name) -> ast.AST:
if node.id == self.old.id:
return self.new
return node
T = TypeVar("T", bound=ast.AST)
def replace_name_with_node(node: T, old_val: ast.Name, new_val: ast.AST) -> T:
"""Returns `node` with all occurences of `old_val` (a variable) replaced with `new_val` (an expression)"""
return NameReplacer(old_val, new_val).visit(node)
class FilterTransformer(LoggingTransformer):
"""Bumps the filter to the iterator"""
def visit_For(self, node: ast.For) -> ast.For:
if_node: ast.If = node.body[0]
filter_condition: ast.Expr = if_node.test
if not isinstance(node.iter, ast.GeneratorExp):
# Create a generator expression if it doesn't exist
GEN_ELT_NAME = "x"
gen_exp: ast.GeneratorExp = cast(
ast.GeneratorExp,
ast_node(f"({GEN_ELT_NAME} for {GEN_ELT_NAME} in seq)").value,
)
gen_target = ast_node(GEN_ELT_NAME).value
iter_comprehension = gen_exp.generators[0]
iter_comprehension.iter = replace_name_with_node(
node.iter, node.target, gen_target
)
else:
gen_exp = node.iter
iter_comprehension = gen_exp.generators[0]
gen_target = gen_exp.elt
iter_comprehension.ifs.append(
replace_name_with_node(filter_condition, node.target, gen_target)
)
node.iter = gen_exp
node.body = if_node.body
return node
def is_smelly(self, node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.If)
)
class FilterIterator(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return FilterTransformer
@property
def warning_message(self):
return "Consider using itertools.product instead of a nested for"
@property
def symbol(self) -> str:
return "filter-iterator"
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{
"imported_by": [
"/good_smell/smells/__init__.py"
],
"imports": [
"/good_smell/ast_smell.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/join_literal.py
|
import ast
from good_smell import AstSmell, LoggingTransformer
try:
# ast.Str is deprecated in py3.8 and will be removed
StrConst = (ast.Constant, ast.Str)
except AttributeError:
StrConst = (ast.Constant,)
class JoinLiteral(AstSmell):
"""Checks if joining a literal of a sequence."""
@property
def transformer_class(self):
return Transformer
@property
def warning_message(self):
return (
"Consider using str.format instead of joining a constant amount of strings."
)
@property
def symbol(self):
return "join-literal"
class Transformer(LoggingTransformer):
"""Checks for usages of str.join with a constant amount of arguments."""
@staticmethod
def normalize_constant(node) -> ast.Constant:
"""Compatibility wrapper for py3.8+, ast, ast.Str and ast.Num are replaced by ast.Constant.
We don't type annotate `node` so it doesn't break on py3.10+ when these classes will be removed.
"""
for attr in ["value", "s", "n"]:
try:
return ast.Constant(value=getattr(node, attr))
except AttributeError:
pass
raise ValueError("Not a constat.")
def visit_Call(self, node: ast.Call) -> ast.Call:
format_arguments = node.args[0].elts
format_delimiter = self.normalize_constant(node.func.value).value
format_string = format_delimiter.join(["{}"] * len(format_arguments))
new_call = ast.Call(
func=ast.Attribute(
value=ast.Constant(value=format_string), attr="format", ctx=ast.Load()
),
args=format_arguments,
keywords=[],
)
return ast.fix_missing_locations(new_call)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Attribute)
and isinstance(node.func.value, StrConst)
and node.func.attr == "join"
and len(node.args) == 1
and isinstance(node.args[0], ast.List)
and not any(isinstance(el, ast.Starred) for el in node.args[0].elts)
)
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{
"imported_by": [
"/good_smell/smells/__init__.py"
],
"imports": [
"/good_smell/ast_smell.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/nested_for.py
|
import ast
import typing
from good_smell import AstSmell, LoggingTransformer
class NameInNode(LoggingTransformer):
def __init__(self, name: ast.Name):
self.name = name
super().__init__(transform=False)
def is_smelly(self, node: ast.AST) -> bool:
return isinstance(node, ast.Name) and node.id == self.name.id
def name_in_node(node: ast.AST, name: ast.Name) -> bool:
"""Checks if the node `name` is in `node`"""
checker = NameInNode(name)
checker.visit(node)
return bool(checker.transformed_nodes)
class NestedFor(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return NestedForTransformer
@property
def warning_message(self):
return "Consider using a nested comprehension instead of a nested for"
@property
def symbol(self):
return "nested-for"
class NestedForTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the nested `for`s and replaces them
with itertools.product"""
def visit_For(self, node: ast.For) -> ast.For:
inner_for: ast.For = node.body[0]
new_target = ast.Tuple(elts=[node.target, inner_for.target])
def create_comprehension(for_node: ast.For) -> ast.comprehension:
return ast.comprehension(target=for_node.target, iter=for_node.iter, ifs=[])
gen_exp = ast.GeneratorExp(
elt=new_target,
generators=[create_comprehension(node), create_comprehension(inner_for)],
)
new_for = ast.For(
target=new_target, iter=gen_exp, body=inner_for.body, orelse=node.orelse
)
new_for = ast.fix_missing_locations(new_for)
return new_for
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and isinstance(node.body[0], ast.For)
and len(node.body) == 1
# Check there's no dependancy between nodes
and not any(
name_in_node(node.body[0].iter, target)
for target in for_target_names(node)
)
)
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
def for_target_names(node: ast.For) -> typing.List[ast.Name]:
"""Returns the names that are the targets of the for loop."""
target = typing.cast(typing.Union[ast.Tuple, ast.Name], node.target)
return target.elts if isinstance(target, ast.Tuple) else [target]
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{
"imported_by": [
"/good_smell/smells/__init__.py",
"/tests/test_no_transform.py"
],
"imports": [
"/good_smell/ast_smell.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/range_len_fix.py
|
import ast
from good_smell import AstSmell, LoggingTransformer
from typing import Union, Container
class RangeLenSmell(AstSmell):
@property
def transformer_class(self):
return EnumerateFixer
@property
def symbol(self):
return "range-len"
@property
def warning_message(self) -> str:
return "Instead of using a c-style for loop, try using enumerate!"
class AssignDeleter(ast.NodeTransformer):
def __init__(self, seq: ast.Name, target: ast.Name):
self.id = target
self.seq = seq
self.elem_target = None or ast.Name(id="elm", ctx=ast.Store())
self.uses_seq = False
def visit_Assign(self, node: ast.Assign):
"""Deletes a node if it assigning using the for target"""
if self.accesses_seq(node.value):
self.elem_target = node.targets[0]
return None
return self.generic_visit(node)
@staticmethod
def __get_slice_id(node: ast.Subscript) -> Container[str]:
"""Get slice identifier.
Needed because in python3.9 ast.Subscript.slice became a ast.Name, instead of a ast.Index."""
slice = node.slice
if isinstance(slice, ast.Name):
return [slice.id]
if isinstance(slice, ast.Index):
return [slice.value.id]
if isinstance(slice, ast.Slice):
return [slice.upper, slice.lower]
def accesses_seq(self, node) -> bool:
"""Checks if the node acceses the sequence[target]"""
if (
isinstance(node, ast.Subscript)
and self.id.id in self.__get_slice_id(node)
and node.value.id == self.seq.id
):
self.uses_seq = True
return True
def visit_Subscript(self, node: ast.Subscript):
if self.accesses_seq(node):
return self.elem_target
return self.generic_visit(node)
class EnumerateFixer(LoggingTransformer):
def visit_For(self, node: ast.For) -> Union[bool, ast.For]:
enumerate_node = ast.Name(id="enumerate", ctx=ast.Load())
node_iterable = node.iter.args[0].args[0]
original_target = node.target
deleter = AssignDeleter(target=original_target, seq=node_iterable)
new_body = deleter.visit(node).body or [ast.Pass()]
elm_target = (
deleter.elem_target
if deleter.uses_seq
else ast.Name(id="_", ctx=ast.Store())
)
# for (original_target,elm_target) in enumerate(node_iterable):
new_node = ast.For(
target=ast.Tuple(elts=[original_target, elm_target], ctx=ast.Store()),
iter=ast.Call(func=enumerate_node, args=[node_iterable], keywords=[]),
body=new_body,
orelse=node.orelse,
)
new_node = ast.fix_missing_locations(ast.copy_location(new_node, node))
new_node = self.generic_visit(new_node)
return new_node
@staticmethod
def is_smelly(node: ast.For):
try:
return node.iter.func.id == "range" and node.iter.args[0].func.id == "len"
except AttributeError:
return False
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{
"imported_by": [
"/good_smell/smells/__init__.py"
],
"imports": [
"/good_smell/ast_smell.py"
]
}
|
Tadaboody/good_smell
|
/good_smell/smells/yield_from.py
|
from good_smell import AstSmell, LoggingTransformer
import ast
class YieldFrom(AstSmell):
"""Checks for yields inside for loops"""
@property
def transformer_class(self):
return YieldFromTransformer
@property
def warning_message(self):
return "Consider using yield from instead of yield inside of a for loop"
@property
def symbol(self):
return "yield-from"
class YieldFromTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the yields in fors and replaces them
with yield from"""
def visit_For(self, node: ast.For):
yield_from = ast.Expr(value=ast.YieldFrom(node.iter))
return ast.fix_missing_locations(yield_from)
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is a yield inside a for"""
return (
isinstance(node, ast.For)
and len(node.body) == 1
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Yield)
)
|
import abc
import ast
from typing import List, Optional, Type, TypeVar
import astor
from good_smell import LintSmell, SmellWarning
class LoggingTransformer(ast.NodeTransformer, abc.ABC):
"""A subclass of transformer that logs the nodes it transforms"""
def __init__(self, transform):
self.transformed_nodes = list()
self.transofrm = transform
@abc.abstractmethod
def is_smelly(self, node: ast.AST) -> bool:
"""Checks if the given `node` should be transformed"""
def visit(self, node: ast.AST):
if not self.is_smelly(node):
return self.generic_visit(node)
self.transformed_nodes.append(node)
if self.transofrm:
return super().visit(node)
return self.generic_visit(node)
T = TypeVar("T")
def unwrap(x: Optional[T]) -> T:
if x is None:
raise ValueError("Unrwapped None")
return x
class AstSmell(LintSmell):
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
transformer = self.transformer_class(self.transform)
transformer.visit(unwrap(self.tree))
node: ast.stmt
return [
SmellWarning(
msg=self.warning_message,
row=node.lineno,
col=node.col_offset,
path=unwrap(self.path),
symbol=self.symbol,
)
for node in transformer.transformed_nodes
]
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
return astor.to_source(self.transformer_class(True).visit(unwrap(self.tree)))
@property
@abc.abstractmethod
def transformer_class(self) -> Type[LoggingTransformer]:
"""The class for the transformer used to create"""
|
{
"imported_by": [
"/good_smell/smells/__init__.py"
],
"imports": [
"/good_smell/ast_smell.py"
]
}
|
Tadaboody/good_smell
|
/tests/test_collection.py
|
import ast
import itertools
from os import PathLike
from pathlib import Path
from typing import Iterator, NamedTuple, Set
import astor
import black
import pytest
from good_smell import fix_smell, smell_warnings
FILE_DIR = Path(__file__).parent
EXAMPLES_DIR = FILE_DIR / "examples"
def normalize_formatting(code: str) -> str:
"""Returns a string of the code with normalized formatting for easier compares"""
code = astor.to_source(ast.parse(code))
try:
return black.format_file_contents(code, fast=True, mode=black.Mode())
except black.NothingChanged:
return code
class CollectedTest(NamedTuple):
desc: str
error_symbols: Set[str]
before: int
after: str
def is_title(line: str) -> bool:
return line.startswith(TITLE_PREFIX)
TITLE_PREFIX = "#:"
BEFORE_AFTER_SPLITTER = "==>"
END_SYMBOL = "END"
SPECIAL_SYMBOLS = (TITLE_PREFIX, BEFORE_AFTER_SPLITTER, END_SYMBOL)
def collect_tests(path: PathLike) -> Iterator[CollectedTest]:
"""Collects all test cases listed in `path`"""
with open(path) as fp:
lines = fp.readlines()
lines_iter = iter(lines) # Create iterator for continued iteration
for line_num, line in enumerate(line for line in lines_iter if is_title(line)):
desc = line.strip("#:").strip()
symbols_line = next(lines_iter).strip("#").strip()
symbols = {symbol for symbol in symbols_line.split(",") if symbol != "None"}
before = "".join(
itertools.takewhile(lambda l: BEFORE_AFTER_SPLITTER not in l, lines_iter)
)
after = "".join(itertools.takewhile(lambda l: END_SYMBOL not in l, lines_iter))
collected_test = CollectedTest(
desc=desc, error_symbols=symbols, before=before, after=after
)
if any(
symbol in field
for field, symbol in itertools.product(collected_test, SPECIAL_SYMBOLS)
):
raise Exception(
f"""Wrongly formatted example in {path}:{line_num}
{collected_test}"""
)
yield collected_test
def test_collect_tests():
example_path = EXAMPLES_DIR / "example.py"
collected_tests = list(collect_tests(example_path))
assert len(collected_tests) == 2
case_with_symbol, case_with_no_symbol = collected_tests
assert case_with_symbol.desc == "example"
assert case_with_symbol.error_symbols == {"example-symbol", "another-one"}
assert case_with_symbol.before == """before = 0\nbefore = 1\n"""
assert case_with_symbol.after == """after = 0\nafter = 1\n"""
assert case_with_no_symbol.error_symbols == set()
test_case_files = [f for f in EXAMPLES_DIR.iterdir() if "example" not in f.name]
def params_from_file():
for file in test_case_files:
yield from (
pytest.param(
case.before,
case.after,
case.error_symbols,
id=f"{file.with_suffix('').name}:{case.desc}",
)
for case in collect_tests(file)
)
@pytest.mark.parametrize(["before", "_", "symbols"], params_from_file())
def test_smell_warning(before, _, symbols):
assert set(symbols) == {smell.symbol for smell in smell_warnings(before)}
@pytest.mark.parametrize(["before", "after", "_"], list(params_from_file()))
def test_smell_fixing(before, after, _):
assert normalize_formatting(fix_smell(before)) == normalize_formatting(after)
|
from pathlib import Path
from typing import Iterable, Type
from fire import Fire
from good_smell import LintSmell, SmellWarning, implemented_smells
def print_smell_warnings(path: str):
"""Prints any warning messages about smells"""
print(
"\n".join(
warning.warning_string()
for warning in smell_warnings(Path(path).read_text(), path)
)
)
def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]:
for smell in implemented_smells:
yield from smell.from_source(
source_code=source, path=str(path), transform=False
).check_for_smell()
def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None):
"""Prints a fixed version of `source`"""
pathlib_path = Path(path)
source = pathlib_path.read_text()
print(fix_smell(source, starting_line, end_line))
def fix_smell(
source: str, starting_line: int = 0, end_line: int = None, path: str = None
) -> str:
"""Returns a fixed version of `source`"""
smell: Type[LintSmell]
for smell in implemented_smells:
source = smell.from_source(
source_code=source,
start_line=starting_line,
end_line=end_line,
path=path,
transform=True,
).fix_smell()
return source
def main():
Fire({"fix": print_fixed_smell})
if __name__ == "__main__":
main()
|
{
"imported_by": [
"/docs/generate_smell_doc.py"
],
"imports": [
"/good_smell/main.py"
]
}
|
Tadaboody/good_smell
|
/tests/test_enumerate_fix.py
|
from good_smell import fix_smell
from re import match
import pytest
valid_sources = ["""
a = [0]
for i in range(len(a)):
print(a[i])
""",
"""
b = [1]
for i in range(len(a + b)):
print(i)
"""]
@pytest.mark.parametrize("source", valid_sources)
def test_range_len_fix(source):
assert not match(r'for \w+ in range\(len\(.+\)\):',
fix_smell(source))
|
from pathlib import Path
from typing import Iterable, Type
from fire import Fire
from good_smell import LintSmell, SmellWarning, implemented_smells
def print_smell_warnings(path: str):
"""Prints any warning messages about smells"""
print(
"\n".join(
warning.warning_string()
for warning in smell_warnings(Path(path).read_text(), path)
)
)
def smell_warnings(source: str, path: str = "") -> Iterable[SmellWarning]:
for smell in implemented_smells:
yield from smell.from_source(
source_code=source, path=str(path), transform=False
).check_for_smell()
def print_fixed_smell(path: str, starting_line: int = 0, end_line: int = None):
"""Prints a fixed version of `source`"""
pathlib_path = Path(path)
source = pathlib_path.read_text()
print(fix_smell(source, starting_line, end_line))
def fix_smell(
source: str, starting_line: int = 0, end_line: int = None, path: str = None
) -> str:
"""Returns a fixed version of `source`"""
smell: Type[LintSmell]
for smell in implemented_smells:
source = smell.from_source(
source_code=source,
start_line=starting_line,
end_line=end_line,
path=path,
transform=True,
).fix_smell()
return source
def main():
Fire({"fix": print_fixed_smell})
if __name__ == "__main__":
main()
|
{
"imported_by": [],
"imports": [
"/good_smell/main.py"
]
}
|
Tadaboody/good_smell
|
/tests/test_no_transform.py
|
import itertools
import ast
from good_smell.smells import NestedFor
def compare_ast(node1, node2):
"""Compare two ast, adapted from https://stackoverflow.com/a/30581854 to py3"""
if type(node1) is not type(node2):
return False
if isinstance(node1, ast.AST):
for k, v in vars(node1).items():
if k in ("lineno", "col_offset", "ctx"):
continue
if not compare_ast(v, getattr(node2, k)):
return False
return True
elif isinstance(node1, list):
return all(itertools.starmap(compare_ast, zip(node1, node2)))
else:
return node1 == node2
def test_no_transform():
source = """
seq_a = [0]
seq_b = range(10)
for i in seq_a:
for j in seq_b:
print(i, j)"""
original_tree = ast.parse(source)
tree = ast.parse(source)
assert NestedFor(transform=False, path="test", tree=tree).check_for_smell()
assert compare_ast(original_tree, tree)
|
import ast
import typing
from good_smell import AstSmell, LoggingTransformer
class NameInNode(LoggingTransformer):
def __init__(self, name: ast.Name):
self.name = name
super().__init__(transform=False)
def is_smelly(self, node: ast.AST) -> bool:
return isinstance(node, ast.Name) and node.id == self.name.id
def name_in_node(node: ast.AST, name: ast.Name) -> bool:
"""Checks if the node `name` is in `node`"""
checker = NameInNode(name)
checker.visit(node)
return bool(checker.transformed_nodes)
class NestedFor(AstSmell):
"""Checks for adjacent nested fors and replaces them with itertools.product"""
@property
def transformer_class(self):
return NestedForTransformer
@property
def warning_message(self):
return "Consider using a nested comprehension instead of a nested for"
@property
def symbol(self):
return "nested-for"
class NestedForTransformer(LoggingTransformer):
"""NodeTransformer that goes visits all the nested `for`s and replaces them
with itertools.product"""
def visit_For(self, node: ast.For) -> ast.For:
inner_for: ast.For = node.body[0]
new_target = ast.Tuple(elts=[node.target, inner_for.target])
def create_comprehension(for_node: ast.For) -> ast.comprehension:
return ast.comprehension(target=for_node.target, iter=for_node.iter, ifs=[])
gen_exp = ast.GeneratorExp(
elt=new_target,
generators=[create_comprehension(node), create_comprehension(inner_for)],
)
new_for = ast.For(
target=new_target, iter=gen_exp, body=inner_for.body, orelse=node.orelse
)
new_for = ast.fix_missing_locations(new_for)
return new_for
@staticmethod
def is_smelly(node: ast.AST):
"""Check if the node is only a nested for"""
return (
isinstance(node, ast.For)
and isinstance(node.body[0], ast.For)
and len(node.body) == 1
# Check there's no dependancy between nodes
and not any(
name_in_node(node.body[0].iter, target)
for target in for_target_names(node)
)
)
def ast_node(expr: str) -> ast.AST:
"""Helper function to parse a string denoting an expression into an AST node"""
# ast.parse returns "Module(body=[Node])"
return ast.parse(expr).body[0]
def for_target_names(node: ast.For) -> typing.List[ast.Name]:
"""Returns the names that are the targets of the for loop."""
target = typing.cast(typing.Union[ast.Tuple, ast.Name], node.target)
return target.elts if isinstance(target, ast.Tuple) else [target]
|
{
"imported_by": [],
"imports": [
"/good_smell/smells/nested_for.py"
]
}
|
EricHughesABC/T2EPGviewer
|
/simple_pandas_plot.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 10:29:38 2017
@author: neh69
"""
import os
import sys
import numpy as np
import pandas as pd
import lmfit as lm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from PyQt5 import QtCore, QtWidgets
import visionplot_widgets
import mriplotwidget
from ImageData import T2imageData
def openStudyDir():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getExistingDirectory(None, "Study Directory", "")
print("openStudyDir\n",returned_data, type(returned_data))
# tree_window.setRootIndex(tree_window.model.index(returned_data))
def openNiftiAnalyzeFile():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "MRI data nifti/analyze", procDataDirPath, "nii files (*.nii);;analyze files (*.img);;All files (*)")
print(returned_data)
def getH5file():
dlg = QtWidgets.QFileDialog()
returned_data = dlg.getOpenFileName(None, "select results file", procDataDirPath, "CSV files (*.csv);;All files (*)")
pathandfilename = returned_data[0]
#self.hd5_store = pd.HDFStore(pathandfilename)
if len(pathandfilename) > 0:
### attempt to extract details from data
print(pathandfilename)
imageData.readin_alldata_from_results_filename( os.path.abspath(pathandfilename))
if imageData.read_T2_img_hdr_files():
print("just before read_T2_data()")
if imageData.read_T2_data():
imageData.read_Dixon_data()
print("just after read_T2_data()")
mainWindow.setWindowTitle(imageData.T2resultsFilenameAndPath)
#### Update image displayed in window
imageData.overlayRoisOnImage(0, imageData.fittingParam)
# mri_window.update_plot(imageData.img1)
mri_window.update_plot(imageData.mriSliceIMG, imageData.maskedROIs)
print("type(imageData.ImageDataT2)",type(imageData.ImageDataT2))
hist_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
bar_window.update_plot([1,imageData.T2slices,imageData.dixonSlices], [imageData.t2_data_summary_df, imageData.dixon_data_summary_df], "T2m")
#### set min max on sliders
mri_window.slicesSlider.setMinimum(0)
mri_window.slicesSlider.setMaximum(imageData.numSlicesT2-1)
mri_window.slicesSlider.setValue(0)
mri_window.echoesSlider.setMinimum(0)
mri_window.echoesSlider.setMaximum(imageData.numEchoesT2-1)
mri_window.slicesSlider.setValue(0)
else:
print(imageData.t2_image_hdr_pathfilename, " not found")
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
if __name__ == "__main__":
lmparams = {}
epgt2fitparams = lm.Parameters()
azzt2fitparams = lm.Parameters()
epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True )
epgt2fitparams.add('Afat', value = 0.20, min=0, max=10, vary=True )
epgt2fitparams.add('Amuscle', value = 0.80, min=0, max=10, vary=True )
epgt2fitparams.add('T1fat', value = 365.0, vary=False)
epgt2fitparams.add('T1muscle', value = 1400, vary=False)
epgt2fitparams.add('echo', value = 10.0, vary=False)
epgt2fitparams.add('B1scale', value = 1.0, min=0, max=2, vary=True )
azzt2fitparams.add_many(('Afat', 60.0, True, 0, 250, None),
('Amuscle', 40.0, True, 0, 250, None),
('T2muscle', 40.0, True, 0, 100, None),
('c_l', 0.55, False, 0, 2000, None),
('c_s', 0.45, False, 0, 2000, None),
('t2_fl', 250.0, False, 0, 2000, None),
('t2_fs', 43.0, False, 0, 2000, None),
('echo', 10.0, False, 0, 2000, None))
lmparams['epgt2fitparams'] = epgt2fitparams
lmparams['azzt2fitparams'] = azzt2fitparams
params=azzt2fitparams
matplotlib.use('Qt5Agg')
plt.style.context('seaborn-colorblind')
sns.set(font_scale = 0.6)
# sns.set_palette("pastel")
procDataDirPath = r"/home/eric/Documents/projects/programming/2019/mri_progs/T2EPGviewer/studyData/testStudy/HC-001/sess-1/upperleg/T2/results/muscle/AzzEPG"
progname = os.path.basename(sys.argv[0])
qApp = QtWidgets.QApplication(sys.argv)
imageData = T2imageData()
print("imageData.fittingParam:",imageData.fittingParam)
mainWindow = QtWidgets.QMainWindow()
mainWindow.setAttribute(QtCore.Qt.WA_DeleteOnClose)
mainWindow.setWindowTitle("application main window")
file_menu = QtWidgets.QMenu('&File', mainWindow)
# file_menu.addAction("&Open study Directory", openStudyDir)
file_menu.addAction('&Choose Study Results File', getH5file, QtCore.Qt.CTRL + QtCore.Qt.Key_H)
# file_menu.addAction('&Open nifti/analyze image File', openNiftiAnalyzeFile )
# file_menu.addAction('&Choose Rois', imageData.getRoiFiles, QtCore.Qt.CTRL + QtCore.Qt.Key_R)
# file_menu.addAction('&Quit', fileQuit, QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
mainWindow.menuBar().addMenu(file_menu)
main_widget = QtWidgets.QWidget(mainWindow)
mainlayout = QtWidgets.QHBoxLayout(main_widget)
# mainWindow.setCentralWidget(main_widget)
# plot_window1 = mri_widget(main_widget)
npts = 256*100
iii = np.random.permutation(np.arange(255*255))[:npts]
ddd = np.random.randn(npts)*100+500
data_df = pd.DataFrame({'iii': iii, 'ddd':ddd})
leftwindow = QtWidgets.QWidget()
rightwindow = QtWidgets.QWidget()
splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
hlayout = QtWidgets.QHBoxLayout(leftwindow)
vlayout = QtWidgets.QVBoxLayout(rightwindow)
mri_window = mriplotwidget.MRIPlotWidget( imageData=imageData)
rbtns_window = visionplot_widgets.radiobuttons_fitWidget(mri_window=mri_window)
t2plot_window = visionplot_widgets.T2PlotWidget( lmparams, showToolbar=False)
bar_window = visionplot_widgets.BarPlotWidget( showToolbar=False, data_df=data_df, image_size=256)
hist_window = visionplot_widgets.HistogramPlotWidget( mri_plot=mri_window, showToolbar=True,data_df=data_df, image_size=256)
mainlayout.addWidget(splitHwidget)
hlayout.addWidget(rbtns_window)
hlayout.addWidget(mri_window)
vlayout.addWidget(t2plot_window)
vlayout.addWidget(bar_window)
vlayout.addWidget(hist_window)
splitHwidget.addWidget(leftwindow)
splitHwidget.addWidget(rightwindow )
mri_window.register_PlotWidgets(t2plot_window, bar_window, hist_window, rbtns_window)
main_widget.setFocus()
mainWindow.setCentralWidget(main_widget)
mainWindow.show()
sys.exit(qApp.exec_())
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 14:55:05 2018
@author: ERIC
"""
import os
import numpy as np
import pandas as pd
import nibabel
class T2imageData():
def __init__(self):
self.currentSlice = None
self.currentEcho = None
self.T2imagesDirpath = None
self.dixonImagesDirpath = None
self.dixonResultsDirpath = None
self.T2resultsDirpath = None
self.root = None
self.studyName = None
self.subject = None
self.session = None
self.imagedRegion = None
self.protocol = None
self.results = None
self.roiType = None
self.fitModel = None
self.imagedRegionType = self.roiType
self.T2imageType = None
self.T2MRIimageFilenameAndPath = ""
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = ""
self.T2resultsFilenameAndPath = ""
self.dixonResultsFilenameAndPath = ""
self.fittingParam = "T2m"
self.numRowsT2 = None
self.numColsT2 = None
self.numSlicesT2 = None
self.numEchoesT2 = None
self.dixonSlices = None
self.T2slices = None
self.ImageDataT2 = None
self.mriSliceIMG = None
self.t2_data_summary_df = None
self.dixon_data_summary_df = None
def readin_alldata_from_results_filename(self, fn):
print("inside readin_alldata_from_results_filename")
self.set_dataDir_and_results_filenames(fn)
self.set_T2imageData_filename_and_type()
self.set_dixonImageData_filename_and_type()
print("T2resultsDirpath :: ",self.T2resultsDirpath)
print("dixonResultsDirpath :: ", self.dixonResultsDirpath)
print("T2imagesDirpath :: ", self.T2imagesDirpath)
print("dixonImagesDirpath :: ", self.dixonImagesDirpath)
print("T2imageType :: ", self.T2imageType)
print("T2MRIimageFilenameAndPath :: ", self.T2MRIimageFilenameAndPath)
print("dixonImageType :: ", self.dixonImageType)
print("dixonMRIimageFilenameAndPath ::", self.dixonMRIimageFilenameAndPath)
print("T2resultsFilenameAndPath :: ", self.T2resultsFilenameAndPath)
print("dixonResultsFilenameAndPath :: ", self.dixonResultsFilenameAndPath)
def set_T2imageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename"""
print("inside set_T2imageData_filename_and_type")
print("self.T2imagesDirpath", self.T2imagesDirpath)
if self.T2imagesDirpath == None:
self.T2imageType = None
return False
else:
imgFilenameList = [ os.path.join(self.T2imagesDirpath,fn)
for fn in os.listdir(self.T2imagesDirpath)
if "nii" in fn or "img" in fn]
if len(imgFilenameList) == 0:
self.T2imageType = None
self.T2MRIimageFilenameAndPath = None
return False
else:
self.T2MRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.T2MRIimageFilenameAndPath:
self.T2imageType = "nifti"
else:
self.T2imageType = "analyze"
return True
def set_dixonImageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename
filename must have fatPC. in it"""
print( "inside set_dixonImageData_filename_and_type")
print("self.dixonImagesDirpath",self.dixonImagesDirpath)
if self.dixonImagesDirpath == None:
self.dionImageType = None
return False
else:
imgFilenameList = [ os.path.join(self.dixonImagesDirpath,fn)
for fn in os.listdir(self.dixonImagesDirpath)
if "fatPC." in fn and ("nii" in fn or "img" in fn)]
if len(imgFilenameList) == 0:
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = None
return False
else:
self.dixonMRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.dixonMRIimageFilenameAndPath:
self.dixonImageType = "nifti"
else:
self.dixonImageType = "analyze"
return True
def set_results_dir(self,protocol, resultsDir):
resultsDirpath = None
# resultsDirpath1 = resultsDir
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType,self.fitModel)
if os.path.exists(dirpath):
resultsDirpath = dirpath
else:
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType)
if os.path.exists(dirpath):
fitModels = [f for f in os.listdir(dirpath)]
if len(fitModels)> 0:
resultsDirpath = os.path.join(dirpath, fitModels[0])
return resultsDir, resultsDirpath
def set_dataDir_and_results_filenames( self, fn):
print("inside set_dataDir_and_results_filenames")
print("fn", fn)
resultsDir, resultsFilename = os.path.split(fn)
print("resultsDir", resultsDir)
print("resultsFilename", resultsFilename)
resultsDirList = resultsDir.split(os.path.sep)
print("resultsDirList",resultsDirList, )
sessionIndex = [ i for i,w in enumerate(resultsDirList) if "sess" in w]
print("sessionIndex",sessionIndex)
if len(sessionIndex):
si = sessionIndex[0]
print("si",si)
print("resultsDirList",resultsDirList)
print("resultsDirList[0]",resultsDirList[0])
# print("resultsDirList[0][-1]",resultsDirList[0][-1])
if len(resultsDirList[0])>0:
if ":" == resultsDirList[0][-1]: # add path seperator if root ends in :
resultsDirList[0] = resultsDirList[0]+os.path.sep
print("resultsDirList[0]", resultsDirList[0])
self.root = os.path.sep.join(resultsDirList[:si-2])
self.studyName = resultsDirList[si-2]
self.subject = resultsDirList[si-1]
self.session = resultsDirList[si]
self.imagedRegion = resultsDirList[si+1]
self.protocol = resultsDirList[si+2]
self.results = resultsDirList[si+3]
self.roiType = imagedRegionType = resultsDirList[si+4]
self.fitModel = resultsDirList[si+5]
print("self.root",self.root)
### create directory paths to T2 and Dixon results and image path
# T2_images_dirPath
# dixon_images_dirPath
# dixon_results_dirPath
# T2_results_dirPath
## T2 image path
dirpath = os.path.join(self.root,self.studyName,self.subject,
self.session,self.imagedRegion,"T2")
if os.path.exists(dirpath):
self.T2imagesDirpath = dirpath
## dixon image path
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,"dixon")
if os.path.exists(dirpath):
self.dixonImagesDirpath = dirpath
## set T2 and dixon results path
if self.protocol.lower() == "t2":
self.T2resultsDirpath, self.dixonResultsDirpath, = self.set_results_dir("dixon", resultsDir)
elif self.protocol.lower() == "dixon":
self.dixonResultsDirpath, self.T2resultsDirpath, = self.set_results_dir("T2", resultsDir)
print("self.dixonResultsDirpath", self.dixonResultsDirpath)
print("self.T2resultsDirpath", self.T2resultsDirpath)
## set csv results path name for T2 and dixon
if "T2".lower() in fn.lower():
self.T2resultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.dixonResultsDirpath,fi)
for fi in os.listdir(self.dixonResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.dixonResultsFilenameAndPath = resultFilenameList[0]
elif "dixon" in fn.lower():
self.dixonResultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.T2resultsDirpath,fi)
for fi in os.listdir(self.T2ResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.T2resultsFilenameAndPath = resultFilenameList[0]
def read_T2_data(self):
print("read_T2_data function entered")
print("self.T2resultsFilenameAndPath", self.T2resultsFilenameAndPath)
if os.path.exists(self.T2resultsFilenameAndPath):
print(self.T2resultsFilenameAndPath, "exists")
self.t2_data_summary_df = pd.read_csv(self.T2resultsFilenameAndPath)
self.T2slices = list(self.t2_data_summary_df["slice"].unique())
return(True)
else:
print(self.T2resultsFilenameAndPath, "not Found" )
return(False)
def read_Dixon_data(self):
print("read_Dixon_data function entered")
print("self.dixonResultsFilenameAndPath",self.dixonResultsFilenameAndPath)
if os.path.exists(self.dixonResultsFilenameAndPath):
print(self.dixonResultsFilenameAndPath, "exists")
self.dixon_data_summary_df = pd.read_csv(self.dixonResultsFilenameAndPath)
self.dixonSlices = list(self.dixon_data_summary_df["slice"].unique())
return(True)
else:
print(self.dixonResultsFilenameAndPath, "not Found" )
self.dixon_data_summary_df = pd.DataFrame()
return(False)
def read_T2_img_hdr_files(self):
if os.path.exists(self.T2MRIimageFilenameAndPath):
print(self.T2MRIimageFilenameAndPath, " found")
self.t2_imghdr = nibabel.load(self.T2MRIimageFilenameAndPath)
image_data = self.t2_imghdr.get_data()
image_data = np.flipud(image_data.swapaxes(1,0))
self.update_imageDataT2(image_data)
[self.numRowsT2, self.numColsT2, self.numSlicesT2, self.numEchoesT2] = self.ImageDataT2.shape
# self.img1 = np.zeros((self.numRowsT2, self.numColsT2,3), dtype=np.double)
self.mriSliceIMG = np.zeros((self.numRowsT2, self.numColsT2), dtype=np.double)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]/(self.ImageDataT2[:,:,0,0].max()*2)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]
self.mriSliceIMG = self.ImageDataT2[:,:,0,0]*1.0
self.currentEcho = 0
self.currentSlice = 0
# mainWindow.setWindowTitle(self.study_name)
return(True)
else:
return(False)
def update_imageDataT2(self, imageData):
self.ImageDataT2 = imageData
def overlayRoisOnImage(self, slice_pos, roi_data):
print("Entering overlayRoisOnImage", slice_pos)
print("roi_data",roi_data)
if roi_data in self.t2_data_summary_df.columns:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
t2_data_query_df = self.t2_data_summary_df.query('slice == {}'.format(str(slice_pos)))
roi_image_layer[t2_data_query_df.pixel_index] = t2_data_query_df[roi_data]
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
elif roi_data in self.dixon_data_summary_df.columns:
# print("slice_pos", slice_pos)
# print("self.T2slices.index(slice_pos)",self.T2slices.index(slice_pos))
# print("self.dixonSlices[self.T2slices.index(slice_pos)]",self.dixonSlices[self.T2slices.index(slice_pos)])
if slice_pos in self.T2slices:
dixon_slice = self.dixonSlices[self.T2slices.index(slice_pos)]
else:
dixon_slice = slice_pos
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
#df_t2 = self.t2_data_summary_df[roi_data, 'pixel_index','roi'].groupby('slice')
dixon_data_query_df = self.dixon_data_summary_df.query('slice == {}'.format(str(dixon_slice)))
# roi_image_layer[dixon_data_query_df.pixels] = dixon_data_query_df[roi_data]/dixon_data_query_df[roi_data].max()
roi_image_layer[dixon_data_query_df.pixel_index] = dixon_data_query_df[roi_data]
# self.img1[:,:,2] = roi_image_layer.reshape((self.numRowsT2,self.numColsT2))
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
else:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
|
{
"imported_by": [],
"imports": [
"/ImageData.py"
]
}
|
EricHughesABC/T2EPGviewer
|
/visionplot_widgets.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 28 13:11:07 2018
@author: neh69
"""
import sys
import numpy as np
#import matplotlib
import pandas as pd
#import mplcursors
from uncertainties import ufloat
import t2fit
import lmfit as lm
from matplotlib import pyplot as plt
#import seaborn as sns
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
import seaborn as sns
if is_pyqt5():
print("pyqt5")
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
print("pyqt4")
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from ImageData import T2imageData
import epgT2paramsDialog
import azzT2paramsDialog
#mxyz90 = np.fromfile( 'epg\mxyz90.txt', sep=' ' )
#mxyz180 = np.fromfile('epg\mxyz180.txt', sep=' ')
#
#mxyz90 = mxyz90.reshape(5,512)
#mxyz180 = mxyz180.reshape(5,512)
#
#offset=130
#step=10
#epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm
#epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees
#epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees
#epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0]
class PlotWidget(QtWidgets.QWidget):
def __init__(self, parent=None, showToolbar=True):
super(PlotWidget,self).__init__(parent)
fig =Figure(figsize=(3, 5))
fig.set_tight_layout(True)
self.plot_canvas = FigureCanvas(fig)
self.ax = fig.add_subplot(111)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.plot_canvas)
if showToolbar:
self.toolbar = NavigationToolbar(self.plot_canvas, self)
self.layout.addWidget(self.toolbar)
def return_ax(self):
return(self.ax)
class HistogramPlotWidget(PlotWidget):
def __init__(self, parent=None, showToolbar=False, mri_plot=None, data_df=None, image_size=256):
self.data_df = data_df
self.image_size = image_size
super(HistogramPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar)
self.buttonUpdate = QtWidgets.QPushButton('Update')
self.buttonUpdate.clicked.connect(self.update)
self.layout.addWidget(self.buttonUpdate)
def update(self):
print((self.ax.get_xlim()))
xmin,xmax = self.ax.get_xlim()
def update_plot(self, slice_info,data_dframes, plot_param):
self.ax.cla()
self.plot_canvas.draw()
print("Entered HistogramPlotWidget.update_image, plot_param =", plot_param)
data_df=None
slice_displayed = slice_info[0]
T2_slices = slice_info[1]
dixon_slices = slice_info[2]
print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns)
print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns)
if isinstance(data_dframes[0],pd.core.frame.DataFrame):
if plot_param in data_dframes[0].columns:
print("plot_param {} found in dataframe is T2".format(plot_param))
data_df = data_dframes[0]
data_df=data_df[data_df["slice"]==slice_displayed]
elif isinstance(data_dframes[1],pd.core.frame.DataFrame):
print("plot_param {} found in dataframe is Dixon".format(plot_param))
print("data_dframes[1].columns",data_dframes[1].columns)
if plot_param in data_dframes[1].columns:
print("plot_param in data_dframes[1]:", plot_param)
data_df = data_dframes[1]
if slice_displayed in T2_slices:
slice_displayed = dixon_slices[T2_slices.index(slice_displayed)]
data_df=data_df[data_df["slice"]==slice_displayed]
else:
print( "HIST", plot_param, " not found")
return False
else:
print("HIST", isinstance(data_dframes[1],pd.core.frame.DataFrame))
return False
print("HIST data_df.shape[0]",data_df.shape[0])
if data_df.shape[0] == 0 or type(data_df) == type(None):
print("HIST return because df shape[0] = 0 or type of data_df = type None")
return False
# self.ax2.cla()
if isinstance(data_df, pd.core.frame.DataFrame):
print("Plotting HIST Plot" )
data_df = data_df.sort_values(by=['roi'])
#plot_param = "T2value"
for roi in data_df.roi.unique():
print(roi)
query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi)
sns.distplot(data_df.query(query_str)[plot_param], hist=False, label=roi, ax=self.ax)
# self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.7);
self.ax.legend()
if plot_param == "T2m":
self.ax.set_xlabel("$T_2$ [ms]")
elif plot_param == "Am100":
self.ax.set_xlabel("$A_m$ [%]")
elif plot_param == "Af100":
self.ax.set_xlabel("$A_f$ [%]")
elif plot_param == "B1":
self.ax.set_xlabel("$B_1$")
elif plot_param == "fatPC":
self.ax.set_xlabel("ff [%]")
self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
self.plot_canvas.draw()
return True
class BarPlotWidget(PlotWidget):
def __init__(self, parent=None, showToolbar=True, data_df=None, image_size=256):
self.data_df = data_df
self.image_size = image_size
super(BarPlotWidget,self).__init__(parent=parent, showToolbar=showToolbar)
# self.buttonUpdate = QtWidgets.QPushButton('Update')
# self.buttonUpdate.clicked.connect(self.update)
# self.layout.addWidget(self.buttonUpdate)
def update(self):
print((self.ax.get_xlim()))
xmin,xmax = self.ax.get_xlim()
def update_plot(self, slice_info,data_dframes, plot_param):
self.ax.cla()
self.plot_canvas.draw()
print("Entered BarPlotWidget.update_image, plot_param =", plot_param)
#print(data_.columns)
slice_displayed = slice_info[0]
T2_slices = slice_info[1]
dixon_slices = slice_info[2]
data_df=None
print("data_dframes[0]", type(data_dframes[0]), data_dframes[0].columns)
print("data_dframes[1]", type(data_dframes[1]), data_dframes[1].columns)
if isinstance(data_dframes[0],pd.core.frame.DataFrame):
if plot_param in data_dframes[0].columns:
print("plot_param {} found in dataframe is T2".format(plot_param))
data_df = data_dframes[0]
data_df=data_df[data_df["slice"]==slice_displayed]
elif isinstance(data_dframes[1],pd.core.frame.DataFrame):
print("plot_param {} found in dataframe is Dixon".format(plot_param))
print("data_dframes[1].columns",data_dframes[1].columns)
if plot_param in data_dframes[1].columns:
print("plot_param in data_dframes[1]:", plot_param)
data_df = data_dframes[1]
if slice_displayed in T2_slices:
slice_displayed = dixon_slices[T2_slices.index(slice_displayed)]
# else:
# dixon_slice = slice_displayed
# slice_displayed = dixon_slices[T2_slices.index(slice_displayed)]
data_df=data_df[data_df["slice"]==slice_displayed]
else:
print( plot_param, " not found")
return(False)
else:
print(isinstance(data_dframes[1],pd.core.frame.DataFrame))
return(False)
print("HIST data_df.shape[0]", data_df.shape[0])
if data_df.shape[0] == 0 or type(data_df) == type(None):
print("return because df shape[0] = 0 or type of data_df = type None")
return False
data_df = data_df.sort_values(by=['roi'])
if isinstance(data_df, pd.core.frame.DataFrame):
print("Plotting BAR Plot" )
#plot_param = "T2value"
# for roi in data_df.roi.unique():
# print(roi)
# query_str = '(slice == {}) and (roi == "{}")'.format(slice_displayed, roi)
# self.ax.hist( data_df.query(query_str)[plot_param], bins=100, label=roi, alpha=0.4);
# self.ax.legend()
# numRois = data_df.roi.unique().shape[0]
sns.catplot( kind='bar',
x='slice',
y=plot_param,
data=data_df,
hue='roi',
ci="sd",
ax=self.return_ax()
);
self.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
if plot_param == "T2m":
self.ax.set_ylabel("$T_2$ [ms]")
elif plot_param == "Am100":
self.ax.set_ylabel("$A_m$ [%]")
elif plot_param == "Af100":
self.ax.set_ylabel("$A_f$ [%]")
elif plot_param == "B1":
self.ax.set_ylabel("$B_1$")
elif plot_param == "fatPC":
self.ax.set_ylabel("ff [%]")
self.ax.set_xlabel("slices")
# plt.tight_layout()
self.plot_canvas.draw()
return True
class T2PlotWidget(PlotWidget):
def __init__( self, lmparams, parent=None, showToolbar=True):
super(T2PlotWidget, self).__init__(parent, showToolbar=showToolbar)
self.plot_T2_startup()
self.lmparams = lmparams
self.T2epgnorm_btns = radiobuttons_EPGWidget(self.lmparams, self)
self.layout.addWidget(self.T2epgnorm_btns)
def plot_T2_startup(self):
ttt = np.linspace(0,170, 17)
yyy = 80*np.exp(-ttt/35.0)+20*np.exp(-ttt/120.0)
yyy1 = yyy+np.random.randn(len(yyy))
self.ax.semilogy(ttt, yyy1, 'o')
self.ax.semilogy(ttt, yyy, '-')
self.ax.set_xlabel('Time [ms]')
self.ax.set_ylabel('Signal')
self.ax.set_ylim(1,110)
def update_plot(self, xcoord, ycoord, t2data):
print("update_T2PlotImag called")
#self.ttt = np.linspace(0,170, 17)
self.ax.cla() # clear the plot area
if self.T2epgnorm_btns.epg_rbtn.isChecked():
print("Run EPG Fit")
print('echo value', self.lmparams['epgt2fitparams']['echo'])
# params = lm.Parameters()
# params.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
# params.add('T2muscle', value = 35, min=0, max=100, vary=True )
# params.add('Afat', value = 0.01, min=0, max=10, vary=True )
# params.add('Amuscle', value = 0.1, min=0, max=10, vary=True )
# params.add('T1fat', value = 365.0, vary=False)
# params.add('T1muscle', value = 1400, vary=False)
# params.add('echo', value = 10.0, vary=False)
#xxx = np.linspace(10,10*len(t2data), len(t2data))
# self.params.pretty_print()
#fit_values, fit_curve, fit_data, lmresults = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data, len(t2data), xxx, epg_dx, epg_p90, epg_p180)
fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleEPG(self.lmparams, t2data)
else:
print("Run Normal T2 Fit")
fit_curve, fit_data, lmresults, xxx = t2fit.calculate_T2values_on_slice_muscleAzz(self.lmparams,t2data)
print(dir(lmresults))
print(lmresults.success)
if not lmresults.success:
return
#
# Create uncertainty floats of varied params
#
ufs = {}
for vname in lmresults.var_names:
v = lmresults.params[vname].value
e = lmresults.params[vname].stderr
ufs[vname] = ufloat( v,e)
if ('Amuscle' in ufs.keys()) and ('Afat' in ufs.keys()):
ufs['Amuscle'] = 100.0*ufs['Amuscle']/(ufs['Amuscle']+ufs['Afat'])
ufs['Afat'] = 100.0-ufs['Amuscle']
t2m_str = ""
t2f_str = ""
Am_str = ""
Af_str = ""
B1_str = ""
for name, value in ufs.items():
print(name)
if name == 'T2muscle':
t2m_str = "$T_{{2m}}$ = ${:5.2fL}$ ms\n".format(value)
elif name == 'T2fat':
t2f_str = "$T_{{2f}}$ = ${:5.2fL}$ ms\n".format(value)
elif name == 'Amuscle':
Am_str = "$A_m$ = ${:5.2fL}$\n".format(value)
elif name == 'Afat':
Af_str = "$A_f$ = ${:5.2fL}$\n".format(value)
elif name == 'B1scale':
B1_str = "$B_1$ scale = ${:5.2fL}$\n".format(value)
results_legend = "{}{}{}{}{}".format(t2m_str, t2f_str, Am_str, Af_str, B1_str)
if self.T2epgnorm_btns.epg_rbtn.isChecked():
self.ax.semilogy(xxx, 100*fit_data, 'o')
self.ax.semilogy(xxx, 100*fit_curve, '-', label=results_legend)
else:
self.ax.semilogy(xxx[2:], 100*fit_curve, '-', label=results_legend)
self.ax.semilogy(xxx, 100*fit_data, 'o')
self.ax.legend( fontsize=8)
#self.ax.set_ylim(1,110)
self.ax.set_xlabel('Time [ms]')
self.ax.set_ylabel('Signal')
self.ax.set_ylim(0.5,150)
self.plot_canvas.draw()
class radiobuttons_EPGWidget(QtWidgets.QWidget):
def __init__(self, lmparams, parent=None):
self.lmparams = lmparams
self.epgDialog = QtWidgets.QDialog()
self.epgT2params_widget = epgT2paramsDialog.EpgT2paramsDialog(self.lmparams)
self.epgT2params_widget.setupEpgT2paramsDialog(self.epgDialog)
self.azzDialog = QtWidgets.QDialog()
self.azzT2params_widget = azzT2paramsDialog.AzzT2paramsDialog(self.lmparams)
self.azzT2params_widget.setupAzzT2paramsDialog(self.azzDialog)
super(radiobuttons_EPGWidget, self).__init__(parent)
hlayout = QtWidgets.QHBoxLayout(self)
group_rbtns = QtWidgets.QButtonGroup()
group_rbtns.exclusive()
self.epg_rbtn = QtWidgets.QRadioButton("EPG T2")
self.norm_rbtn = QtWidgets.QRadioButton("normal T2")
self.norm_rbtn.setChecked(True);
self.T2params_btn = QtWidgets.QPushButton("T2 Parameters")
self.epg_rbtn.fittingParam = "epg"
self.norm_rbtn.fittingParam= 'norm'
self.epg_rbtn.toggled.connect(lambda:self.btnstate(self.epg_rbtn))
self.norm_rbtn.toggled.connect(lambda:self.btnstate(self.norm_rbtn))
self.T2params_btn.clicked.connect(self.T2params_btn_clicked)
group_rbtns.addButton(self.epg_rbtn)
group_rbtns.addButton(self.norm_rbtn)
hlayout.addWidget(self.norm_rbtn)
hlayout.addWidget(self.epg_rbtn)
hlayout.addStretch(1)
hlayout.addWidget(self.T2params_btn)
def T2params_btn_clicked(self):
print("T2params_btn_clicked")
if self.epg_rbtn.isChecked():
rt = self.epgDialog.show()
else:
rt = self.azzDialog.show()
print("rt =", rt)
def btnstate(self,b):
if b.isChecked():
print(b.text())
print(b.fittingParam)
#self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam))
class radiobuttons_fitWidget(QtWidgets.QWidget):
def __init__(self, parent=None, mri_window=None):
super(radiobuttons_fitWidget, self).__init__(parent)
self.mri_window = mri_window
vbox1_radiobuttons = QtWidgets.QVBoxLayout(self)
group_fittingParams_rbtns = QtWidgets.QButtonGroup()
group_fittingParams_rbtns.exclusive()
self.T2_rbtn = QtWidgets.QRadioButton("T2")
self.Am_rbtn = QtWidgets.QRadioButton("Am")
self.Af_rbtn = QtWidgets.QRadioButton("Af")
self.B1_rbtn = QtWidgets.QRadioButton("B1")
self.Dixon_rbtn = QtWidgets.QRadioButton("Dixon Fat [%]")
self.T2_rbtn.setChecked(True)
self.T2_rbtn.fittingParam = "T2m"
self.Am_rbtn.fittingParam = "Am100"
self.Af_rbtn.fittingParam = "Af100"
self.B1_rbtn.fittingParam = "B1"
self.Dixon_rbtn.fittingParam = "fatPC"
self.T2_rbtn.toggled.connect(lambda:self.btnstate(self.T2_rbtn))
self.Am_rbtn.toggled.connect(lambda:self.btnstate(self.Am_rbtn))
self.Af_rbtn.toggled.connect(lambda:self.btnstate(self.Af_rbtn))
self.B1_rbtn.toggled.connect(lambda:self.btnstate(self.B1_rbtn))
self.Dixon_rbtn.toggled.connect(lambda:self.btnstate(self.Dixon_rbtn))
group_fittingParams_rbtns.addButton(self.T2_rbtn)
group_fittingParams_rbtns.addButton(self.Am_rbtn)
group_fittingParams_rbtns.addButton(self.Af_rbtn)
group_fittingParams_rbtns.addButton(self.B1_rbtn)
group_fittingParams_rbtns.addButton(self.Dixon_rbtn)
vbox1_radiobuttons.addWidget(self.T2_rbtn)
vbox1_radiobuttons.addWidget(self.Am_rbtn)
vbox1_radiobuttons.addWidget(self.Af_rbtn)
vbox1_radiobuttons.addWidget(self.B1_rbtn)
vbox1_radiobuttons.addWidget(self.Dixon_rbtn)
vbox1_radiobuttons.addStretch(1)
def btnstate(self,b):
if b.isChecked():
print(b.text())
print(b.fittingParam)
self.mri_window.on_fittingParams_rbtn_toggled( str(b.fittingParam))
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self, params):
self.params = params
imageData = T2imageData()
print("imageData.fittingParam:",imageData.fittingParam)
npts = 256*100
iii = np.random.permutation(np.arange(255*255))[:npts]
ddd = np.random.randn(npts)*100+500
data_df = pd.DataFrame({'iii': iii, 'ddd':ddd})
super(ApplicationWindow, self).__init__()
leftwindow = QtWidgets.QWidget()
rightwindow = QtWidgets.QWidget()
splitHwidget = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
#hlayout = QtWidgets.QHBoxLayout(self._main)
hlayout = QtWidgets.QHBoxLayout(leftwindow)
vlayout = QtWidgets.QVBoxLayout(rightwindow)
mriplot_window = MRIPlotWidget(imageData=imageData)
rbtns_window = radiobuttons_fitWidget(mri_window=mriplot_window)
t2plot_window = T2PlotWidget( self.params, showToolbar=False)
h1_window = PlotWidget( showToolbar=False)
h2_window = HistogramPlotWidget(showToolbar=True)
#hlayout.addWidget(mriplot_window)
mriplot_window.register_PlotWidgets(t2plot_window, h1_window, h2_window)
#vbox1_radiobuttons = QtWidgets.QVBoxLayout()
# hbox.addLayout(vbox1_radiobuttons)
# hbox.addLayout(vbox1_image)
# hbox.addLayout(vbox2_image)
hlayout.addWidget(rbtns_window)
hlayout.addWidget(mriplot_window)
vlayout.addWidget(t2plot_window)
vlayout.addWidget(h1_window)
vlayout.addWidget(h2_window)
def func3(x, y):
return (1 - x / 2 + x**5 + y**3) * np.exp(-(x**2 + y**2))
# make these smaller to increase the resolution
dx, dy = 0.05, 0.05
x = np.arange(-3.0, 3.0, dx)
y = np.arange(-3.0, 3.0, dy)
X, Y = np.meshgrid(x, y)
# when layering multiple images, the images need to have the same
# extent. This does not mean they need to have the same shape, but
# they both need to render to the same coordinate system determined by
# xmin, xmax, ymin, ymax. Note if you use different interpolations
# for the images their apparent extent could be different due to
# interpolation edge effects
extent = np.min(x), np.max(x), np.min(y), np.max(y)
Z1 = np.add.outer(range(8), range(8)) % 2 # chessboard
mriplot_window.return_ax().imshow(Z1, cmap=plt.cm.gray,
interpolation='nearest', extent=extent)
Z2 = func3(X, Y)
mriplot_window.return_ax().imshow(Z2, cmap=plt.cm.viridis, alpha=.9,
interpolation='bilinear', extent=extent)
splitHwidget.addWidget(leftwindow)
splitHwidget.addWidget(rightwindow )
print(data_df.head())
plot_image = np.zeros(255*255)
plot_image[data_df['iii']] = data_df['ddd']
h1_window.return_ax().imshow( plot_image.reshape((255,255)))
h1_window.return_ax().set_xlabel('x')
h1_window.return_ax().set_ylabel('y')
h2_window.return_ax().hist(ddd, bins=100)
h2_window.return_ax().set_xlabel('x')
h2_window.return_ax().set_ylabel('y')
self.setCentralWidget(splitHwidget)
def zoom(self):
self.histtoolbar.zoom()
def ax_changed(self,ax):
old_xlim, old_ylim = self.lim_dict[ax]
print("old xlim", old_xlim, "ylim", old_ylim)
print("new xlim", ax.get_xlim(), "ylim", ax.get_ylim())
return np.all(old_xlim == ax.get_xlim()) and np.all(old_ylim == ax.get_ylim())
def onrelease(self,event):
print("Active Toolbar button:",self.histtoolbar._active )
print("plot release")
print(event)
self.static_canvas.flush_events()
changed_axes = [ax for ax in self.static_canvas.figure.axes if self.ax_changed(ax)]
not_changed_axes = [ax for ax in self.static_canvas.figure.axes if not self.ax_changed(ax)]
print("changed_axes",changed_axes)
print("not_changed_axes",not_changed_axes)
for ax in changed_axes:
print("Changed xlim", ax.get_xlim(), "ylim", ax.get_ylim())
if __name__ == "__main__":
epgt2fitparams = lm.Parameters()
epgt2fitparams.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
epgt2fitparams.add('T2muscle', value = 35, min=0, max=100, vary=True )
epgt2fitparams.add('Afat', value = 0.2, min=0, max=10, vary=True )
epgt2fitparams.add('Amuscle', value = 0.8, min=0, max=10, vary=True )
epgt2fitparams.add('T1fat', value = 365.0, vary=False)
epgt2fitparams.add('T1muscle', value = 1400, vary=False)
epgt2fitparams.add('echo', value = 10.0, vary=False)
qapp = QtWidgets.QApplication(sys.argv)
app = ApplicationWindow(epgt2fitparams)
app.show()
qapp.exec_()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 14:55:05 2018
@author: ERIC
"""
import os
import numpy as np
import pandas as pd
import nibabel
class T2imageData():
def __init__(self):
self.currentSlice = None
self.currentEcho = None
self.T2imagesDirpath = None
self.dixonImagesDirpath = None
self.dixonResultsDirpath = None
self.T2resultsDirpath = None
self.root = None
self.studyName = None
self.subject = None
self.session = None
self.imagedRegion = None
self.protocol = None
self.results = None
self.roiType = None
self.fitModel = None
self.imagedRegionType = self.roiType
self.T2imageType = None
self.T2MRIimageFilenameAndPath = ""
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = ""
self.T2resultsFilenameAndPath = ""
self.dixonResultsFilenameAndPath = ""
self.fittingParam = "T2m"
self.numRowsT2 = None
self.numColsT2 = None
self.numSlicesT2 = None
self.numEchoesT2 = None
self.dixonSlices = None
self.T2slices = None
self.ImageDataT2 = None
self.mriSliceIMG = None
self.t2_data_summary_df = None
self.dixon_data_summary_df = None
def readin_alldata_from_results_filename(self, fn):
print("inside readin_alldata_from_results_filename")
self.set_dataDir_and_results_filenames(fn)
self.set_T2imageData_filename_and_type()
self.set_dixonImageData_filename_and_type()
print("T2resultsDirpath :: ",self.T2resultsDirpath)
print("dixonResultsDirpath :: ", self.dixonResultsDirpath)
print("T2imagesDirpath :: ", self.T2imagesDirpath)
print("dixonImagesDirpath :: ", self.dixonImagesDirpath)
print("T2imageType :: ", self.T2imageType)
print("T2MRIimageFilenameAndPath :: ", self.T2MRIimageFilenameAndPath)
print("dixonImageType :: ", self.dixonImageType)
print("dixonMRIimageFilenameAndPath ::", self.dixonMRIimageFilenameAndPath)
print("T2resultsFilenameAndPath :: ", self.T2resultsFilenameAndPath)
print("dixonResultsFilenameAndPath :: ", self.dixonResultsFilenameAndPath)
def set_T2imageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename"""
print("inside set_T2imageData_filename_and_type")
print("self.T2imagesDirpath", self.T2imagesDirpath)
if self.T2imagesDirpath == None:
self.T2imageType = None
return False
else:
imgFilenameList = [ os.path.join(self.T2imagesDirpath,fn)
for fn in os.listdir(self.T2imagesDirpath)
if "nii" in fn or "img" in fn]
if len(imgFilenameList) == 0:
self.T2imageType = None
self.T2MRIimageFilenameAndPath = None
return False
else:
self.T2MRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.T2MRIimageFilenameAndPath:
self.T2imageType = "nifti"
else:
self.T2imageType = "analyze"
return True
def set_dixonImageData_filename_and_type(self):
"""Searches for image data in directory
can be nifti or analyze sets the type and filename
filename must have fatPC. in it"""
print( "inside set_dixonImageData_filename_and_type")
print("self.dixonImagesDirpath",self.dixonImagesDirpath)
if self.dixonImagesDirpath == None:
self.dionImageType = None
return False
else:
imgFilenameList = [ os.path.join(self.dixonImagesDirpath,fn)
for fn in os.listdir(self.dixonImagesDirpath)
if "fatPC." in fn and ("nii" in fn or "img" in fn)]
if len(imgFilenameList) == 0:
self.dixonImageType = None
self.dixonMRIimageFilenameAndPath = None
return False
else:
self.dixonMRIimageFilenameAndPath = imgFilenameList[0]
if "nii" in self.dixonMRIimageFilenameAndPath:
self.dixonImageType = "nifti"
else:
self.dixonImageType = "analyze"
return True
def set_results_dir(self,protocol, resultsDir):
resultsDirpath = None
# resultsDirpath1 = resultsDir
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType,self.fitModel)
if os.path.exists(dirpath):
resultsDirpath = dirpath
else:
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,protocol, self.results,self.roiType)
if os.path.exists(dirpath):
fitModels = [f for f in os.listdir(dirpath)]
if len(fitModels)> 0:
resultsDirpath = os.path.join(dirpath, fitModels[0])
return resultsDir, resultsDirpath
def set_dataDir_and_results_filenames( self, fn):
print("inside set_dataDir_and_results_filenames")
print("fn", fn)
resultsDir, resultsFilename = os.path.split(fn)
print("resultsDir", resultsDir)
print("resultsFilename", resultsFilename)
resultsDirList = resultsDir.split(os.path.sep)
print("resultsDirList",resultsDirList, )
sessionIndex = [ i for i,w in enumerate(resultsDirList) if "sess" in w]
print("sessionIndex",sessionIndex)
if len(sessionIndex):
si = sessionIndex[0]
print("si",si)
print("resultsDirList",resultsDirList)
print("resultsDirList[0]",resultsDirList[0])
# print("resultsDirList[0][-1]",resultsDirList[0][-1])
if len(resultsDirList[0])>0:
if ":" == resultsDirList[0][-1]: # add path seperator if root ends in :
resultsDirList[0] = resultsDirList[0]+os.path.sep
print("resultsDirList[0]", resultsDirList[0])
self.root = os.path.sep.join(resultsDirList[:si-2])
self.studyName = resultsDirList[si-2]
self.subject = resultsDirList[si-1]
self.session = resultsDirList[si]
self.imagedRegion = resultsDirList[si+1]
self.protocol = resultsDirList[si+2]
self.results = resultsDirList[si+3]
self.roiType = imagedRegionType = resultsDirList[si+4]
self.fitModel = resultsDirList[si+5]
print("self.root",self.root)
### create directory paths to T2 and Dixon results and image path
# T2_images_dirPath
# dixon_images_dirPath
# dixon_results_dirPath
# T2_results_dirPath
## T2 image path
dirpath = os.path.join(self.root,self.studyName,self.subject,
self.session,self.imagedRegion,"T2")
if os.path.exists(dirpath):
self.T2imagesDirpath = dirpath
## dixon image path
dirpath = os.path.join(self.root,self.studyName,self.subject,self.session,
self.imagedRegion,"dixon")
if os.path.exists(dirpath):
self.dixonImagesDirpath = dirpath
## set T2 and dixon results path
if self.protocol.lower() == "t2":
self.T2resultsDirpath, self.dixonResultsDirpath, = self.set_results_dir("dixon", resultsDir)
elif self.protocol.lower() == "dixon":
self.dixonResultsDirpath, self.T2resultsDirpath, = self.set_results_dir("T2", resultsDir)
print("self.dixonResultsDirpath", self.dixonResultsDirpath)
print("self.T2resultsDirpath", self.T2resultsDirpath)
## set csv results path name for T2 and dixon
if "T2".lower() in fn.lower():
self.T2resultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.dixonResultsDirpath,fi)
for fi in os.listdir(self.dixonResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.dixonResultsFilenameAndPath = resultFilenameList[0]
elif "dixon" in fn.lower():
self.dixonResultsFilenameAndPath = fn
resultFilenameList = [ os.path.join(self.T2resultsDirpath,fi)
for fi in os.listdir(self.T2ResultsDirpath)
if "results." in fi.lower() and (".csv" in fi.lower() )]
if resultFilenameList:
self.T2resultsFilenameAndPath = resultFilenameList[0]
def read_T2_data(self):
print("read_T2_data function entered")
print("self.T2resultsFilenameAndPath", self.T2resultsFilenameAndPath)
if os.path.exists(self.T2resultsFilenameAndPath):
print(self.T2resultsFilenameAndPath, "exists")
self.t2_data_summary_df = pd.read_csv(self.T2resultsFilenameAndPath)
self.T2slices = list(self.t2_data_summary_df["slice"].unique())
return(True)
else:
print(self.T2resultsFilenameAndPath, "not Found" )
return(False)
def read_Dixon_data(self):
print("read_Dixon_data function entered")
print("self.dixonResultsFilenameAndPath",self.dixonResultsFilenameAndPath)
if os.path.exists(self.dixonResultsFilenameAndPath):
print(self.dixonResultsFilenameAndPath, "exists")
self.dixon_data_summary_df = pd.read_csv(self.dixonResultsFilenameAndPath)
self.dixonSlices = list(self.dixon_data_summary_df["slice"].unique())
return(True)
else:
print(self.dixonResultsFilenameAndPath, "not Found" )
self.dixon_data_summary_df = pd.DataFrame()
return(False)
def read_T2_img_hdr_files(self):
if os.path.exists(self.T2MRIimageFilenameAndPath):
print(self.T2MRIimageFilenameAndPath, " found")
self.t2_imghdr = nibabel.load(self.T2MRIimageFilenameAndPath)
image_data = self.t2_imghdr.get_data()
image_data = np.flipud(image_data.swapaxes(1,0))
self.update_imageDataT2(image_data)
[self.numRowsT2, self.numColsT2, self.numSlicesT2, self.numEchoesT2] = self.ImageDataT2.shape
# self.img1 = np.zeros((self.numRowsT2, self.numColsT2,3), dtype=np.double)
self.mriSliceIMG = np.zeros((self.numRowsT2, self.numColsT2), dtype=np.double)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]/(self.ImageDataT2[:,:,0,0].max()*2)
# self.img1[:,:,0] = self.ImageDataT2[:,:,0,0]
self.mriSliceIMG = self.ImageDataT2[:,:,0,0]*1.0
self.currentEcho = 0
self.currentSlice = 0
# mainWindow.setWindowTitle(self.study_name)
return(True)
else:
return(False)
def update_imageDataT2(self, imageData):
self.ImageDataT2 = imageData
def overlayRoisOnImage(self, slice_pos, roi_data):
print("Entering overlayRoisOnImage", slice_pos)
print("roi_data",roi_data)
if roi_data in self.t2_data_summary_df.columns:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
t2_data_query_df = self.t2_data_summary_df.query('slice == {}'.format(str(slice_pos)))
roi_image_layer[t2_data_query_df.pixel_index] = t2_data_query_df[roi_data]
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
elif roi_data in self.dixon_data_summary_df.columns:
# print("slice_pos", slice_pos)
# print("self.T2slices.index(slice_pos)",self.T2slices.index(slice_pos))
# print("self.dixonSlices[self.T2slices.index(slice_pos)]",self.dixonSlices[self.T2slices.index(slice_pos)])
if slice_pos in self.T2slices:
dixon_slice = self.dixonSlices[self.T2slices.index(slice_pos)]
else:
dixon_slice = slice_pos
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
#df_t2 = self.t2_data_summary_df[roi_data, 'pixel_index','roi'].groupby('slice')
dixon_data_query_df = self.dixon_data_summary_df.query('slice == {}'.format(str(dixon_slice)))
# roi_image_layer[dixon_data_query_df.pixels] = dixon_data_query_df[roi_data]/dixon_data_query_df[roi_data].max()
roi_image_layer[dixon_data_query_df.pixel_index] = dixon_data_query_df[roi_data]
# self.img1[:,:,2] = roi_image_layer.reshape((self.numRowsT2,self.numColsT2))
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
else:
roi_image_layer = np.zeros(self.numRowsT2*self.numColsT2)
self.maskedROIs = np.ma.masked_where(roi_image_layer == 0, roi_image_layer)
|
{
"imported_by": [],
"imports": [
"/ImageData.py"
]
}
|
DiegoArcelli/BlocksWorld
|
/launch.py
|
import tkinter as tk
from tkinter.filedialog import askopenfilename
from PIL import Image, ImageTk
from load_state import prepare_image
from utils import draw_state
from blocks_world import BlocksWorld
from search_algs import *
# file che contiene l'implementazione dell'interfaccia grafica per utilizzare il programma
class Window(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.initial_state = None
self.goal_state = None
self.create_widgets()
self.create_images("insert_image.png", "insert_image.png")
def create_widgets(self):
initial_label = tk.Label(self, text = "Seleziona stato iniziale:")
goal_label = tk.Label(self, text = "Seleziona stato finale:")
initial_label.grid(row = 0, column = 0, padx = 10, pady = 10)
goal_label.grid(row = 0, column = 2, padx = 10, pady = 10)
initial_button = tk.Button(self, text="Seleziona file", command=self.open_initial)
goal_button = tk.Button(self, text="Seleziona file", command=self.open_goal)
initial_button.grid(row = 1, column = 0, padx = 10, pady = 10)
goal_button.grid(row = 1, column = 2, padx = 10, pady = 10)
alg_label = tk.Label(self, text = "Seleziona algoritmo di ricerca:")
alg_label.grid(row = 0, column = 1, padx = 10, pady = 10)
frame = tk.Frame(self)
frame.grid(row = 1, column = 1, padx = 10, pady = 10)
self.selected = tk.StringVar(self)
self.selected.set("BFS")
select_alg_menu = tk.OptionMenu(frame, self.selected, "BFS", "DFS", "IDS", "UCS", "A*", "RBFS", command=self.read_algorithm).pack()
start_button = tk.Button(frame, text="Start search", command=self.start_search).pack()
def create_images(self, initial, goal):
self.initial_image_path = initial
self.initial_image = ImageTk.PhotoImage(Image.open("./images/" + initial).resize((300, 300)))
initial_image_label = tk.Label(self, image=self.initial_image)
initial_image_label.grid(row = 2, column = 0, padx = 10, pady = 10)
self.goal_image_path = goal
self.goal_image = ImageTk.PhotoImage(Image.open("./images/" + goal).resize((300, 300)))
goal_image_label = tk.Label(self, image=self.goal_image)
goal_image_label.grid(row = 2, column = 2, padx = 10, pady = 10)
def open_initial(self):
self.initial_file = askopenfilename()
if self.initial_file == ():
return
self.initial_state = prepare_image(self.initial_file, False)
print(self.initial_state)
draw_state(self.initial_state, "initial")
self.create_images("/temp/initial.jpg", self.goal_image_path)
def read_algorithm(self, alg):
return alg
def open_goal(self):
self.goal_file = askopenfilename()
if self.goal_file == ():
return
self.goal_state = prepare_image(self.goal_file, False)
print(self.goal_state)
draw_state(self.goal_state, "goal")
self.create_images(self.initial_image_path, "/temp/goal.jpg")
def start_search(self):
if self.goal_state is None and self.initial_state is None:
return
alg = self.selected.get()
problem = BlocksWorld(self.initial_state, self.goal_state)
print("Inizio ricerca:")
if alg == "BFS":
problem.solution(graph_bfs(problem).solution())
if alg == "A*":
problem.solution(a_star(problem, lambda n: problem.misplaced_blocks(n)).solution())
if alg == "DFS":
problem.solution(graph_dfs(problem).solution())
if alg == "IDS":
problem.solution(ids(problem).solution())
if alg == "RBFS":
problem.solution(rbfs(problem, lambda n: problem.misplaced_blocks(n)).solution())
if alg == "UCS":
problem.solution(a_star(problem, lambda n: problem.depth(n)).solution())
root = tk.Tk()
root.title("Blocks World")
root.resizable(0, 0)
app = Window(master=root)
app.mainloop()
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import glob
from tensorflow import keras
from math import ceil
deteced = [np.array([]) for x in range(6)] # lista che contiene le immagini delle cifre
poisitions = [None for x in range(6)] # lista che contiene la posizione delle cifre nell'immagine
debug_mode = False
model = keras.models.load_model("./model/model.h5") # carica il modello allenato sul datase del MNIST
# funzione che si occupa del riconoscimento della cifra presente nell'immagine
# che gli viene passato come parametro
def predict(image):
h, w = image.shape
l = int(max(image.shape)*1.2)
n_h = int((l - h)/2)
n_w = int((l - w)/2)
img = np.zeros((l, l), np.uint8)
img[n_h : n_h + h, n_w : n_w + w] = image
img = (img / 255).astype('float64')
img = cv.resize(img, (28, 28), interpolation = cv.INTER_AREA)
_in = np.array([img])
_in = np.expand_dims(_in, -1)
digit = np.argmax(model.predict(_in))
if debug_mode:
print(digit)
show(img)
return digit - 1 if digit > 0 else -1
# stampa a schermo l'immagine che gli veiene passata come parametro
def show(img):
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.show()
# prime modifiche all'immagine che consistono nell'applicazione di blur
def preprocess(image):
image = cv.medianBlur(image, 3)
image = cv.GaussianBlur(image, (3, 3), 0)
return 255 - image
def postprocess(image):
image = cv.medianBlur(image, 5)
image = cv.medianBlur(image, 5)
kernel = np.ones((3, 3), np.uint8)
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
kernel = np.ones((3, 3), np.uint8)
image = cv.erode(image, kernel, iterations=2)
return image
def get_block_index(image_shape, yx, block_size):
y = np.arange(max(0, yx[0]-block_size),
min(image_shape[0], yx[0]+block_size))
x = np.arange(max(0, yx[1]-block_size),
min(image_shape[1], yx[1]+block_size))
return np.meshgrid(y, x)
def adaptive_median_threshold(img_in):
med = np.median(img_in)
threshold = 40
img_out = np.zeros_like(img_in)
img_out[img_in - med < threshold] = 255
return img_out
def block_image_process(image, block_size):
out_image = np.zeros_like(image)
for row in range(0, image.shape[0], block_size):
for col in range(0, image.shape[1], block_size):
idx = (row, col)
block_idx = get_block_index(image.shape, idx, block_size)
out_image[block_idx] = adaptive_median_threshold(image[block_idx])
return out_image
def clean(image):
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if search_noise(contour, approx, image.shape[::-1]):
cv.drawContours(image, [approx], 0, 255, -1)
return image
def search_noise(contour, approx, image_size):
i_h, i_w = image_size
x, y, w, h = cv.boundingRect(approx)
image_area = i_w*i_h
if cv.contourArea(contour) >= image_area/1000:
return False
if w >= i_w/50 or h >= i_h/50:
return False
return True
def find_digits(image, org_image, org):
contours, hierarchy = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
i = 0
for contour in contours:
approx = cv.approxPolyDP(contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if hierarchy[0][i][3] == -1:
prev = predict(org_image[y:y+h, x:x+w])
if prev != -1:
deteced[prev] = org[y:y+h, x:x+w]
poisitions[prev] = (x, y, x + w, y + h)
i += 1
# funzione che individua il box che contiene i blocchi ed individua le cifre
def find_box(image):
o_h, o_w = image.shape[0:2]
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
contours.sort(reverse=True, key=lambda c: cv.contourArea(c))
contour = contours[1]
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
box = (x, y, x + w, y + h)
img = image[y:y+h, x:x+w]
sub = img.copy()
bg = ~np.zeros((h + 50, w + 50), np.uint8)
bg[25: 25 + h, 25: 25 + w] = img
img = bg
i = 0
i_h, i_w = img.shape[0:2]
tot = np.zeros(shape=(i_h, i_w))
if debug_mode:
print(image)
contours, hierarchy = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
if hierarchy[0][i][3] == 0:
cv.drawContours(tot, [approx], 0, 255, -1)
if hierarchy[0][i][3] == 1:
cv.drawContours(tot, [approx], 0, 0, -1)
i += 1
tot = tot[25: 25 + h, 25: 25 + w]
kernel = np.ones((5, 5), np.uint8)
tot = cv.dilate(tot, kernel, iterations=3)
tot = tot.astype('uint32')
sub = sub.astype('uint32')
res = sub + tot
res = np.where(res == 0, 255, 0)
result = np.zeros((o_h, o_w), np.uint8)
result[y:y+h, x:x+w] = res
if debug_mode:
show(result)
return (result, box)
def get_block_borders(dims, image):
x_i, y_i, x_f, y_f = dims
kernel = np.ones((5, 5), np.uint8)
image = cv.erode(image, kernel, iterations=1)
y_m = (y_f + y_i) // 2
x_m = (x_f + x_i) // 2
t = x_i - 1
while image[y_m, t] != 255:
t-=1
x_i = t
t = x_f + 1
while image[y_m, t] != 255:
t+=1
x_f = t
t = y_i - 1
while image[t, x_m] != 255:
t-=1
y_i = t
t = y_f + 1
while image[t, x_m] != 255:
t+=1
y_f = t
return (x_i, y_i, x_f, y_f)
def process_image_file(filename):
global deteced, poisitions, explored, debug_mode
block_size = 50
deteced = [np.array([]) for x in range(6)]
poisitions = [None for x in range(6)]
explored = []
image_in = cv.cvtColor(cv.imread(filename), cv.COLOR_BGR2GRAY)
if debug_mode:
show(image_in)
image_in_pre = preprocess(image_in)
image_out = block_image_process(image_in_pre, block_size)
image_out = postprocess(image_out)
image_out = clean(image_out)
if debug_mode:
show(image_out)
digits, box = find_box(image_out)
find_digits(digits, ~image_out, image_in)
for i in range(6):
if deteced[i].size > 0:
image = deteced[i]
x, y, w, h = get_block_borders(poisitions[i], ~image_out)
poisitions[i] = (x, y, w, h)
cv.rectangle(image_in, (x, y), (w, h), 255, 2)
if debug_mode:
show(image_in)
return box
def check_intersection(values):
v1_i, v1_f, v2_i, v2_f = values
v2_m = (v2_i + v2_f) // 2
if v1_i < v2_m and v1_f > v2_m:
return True
return False
def create_state(poisitions, box):
cols = [[] for x in range(6)]
mean_points = []
for i in range(6):
if poisitions[i] is not None:
x1_i, y1_i, x1_f, y1_f = poisitions[i]
mean_points.append(((x1_f + x1_i) // 2, ((y1_f + y1_i) // 2)))
c = [i+1]
for j in range(6):
if poisitions[j] is not None and j != i:
x2_i, y2_i, x2_f, y2_f = poisitions[j]
if check_intersection((x1_i, x1_f, x2_i, x2_f)):
c.append(j+1)
c.sort()
cols[i] = tuple([*c])
else:
cols[i] = ()
temp_cols = list(set(tuple(cols)))
if () in temp_cols:
temp_cols.remove(())
cols = []
for t_col in temp_cols:
col = list(t_col)
col.sort(reverse=True, key=lambda e: mean_points[e-1][1])
cols.append(tuple(col))
cols.sort(key=lambda e: mean_points[e[0]-1][0])
bottoms = [col[0] for col in cols]
distances = []
xb_i, _, xb_f, _ = box
x_i, _, x_f, _ = poisitions[bottoms[0]-1]
dist = abs(x_i - xb_i)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(bottoms)-1):
x1_i, _, x1_f, _ = poisitions[bottoms[i]-1]
x2_i, _, _, _ = poisitions[bottoms[i+1]-1]
dist = abs(x2_i - x1_f)
dist = dist / (x1_f - x1_i)
distances.append(dist)
x_i, _, x_f, _ = poisitions[bottoms[-1]-1]
dist = abs(xb_f - x_f)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(distances)):
dist = distances[i]
if dist - int(dist) >= 0.5:
distances[i] = int(dist) + 1
else:
distances[i] = int(dist)
n = sum(distances) + len(cols)
i = distances[0]
state = []
pos = 1
for col in cols:
j = 0
for block in col:
state.append((block, j, i))
j += 1
i += distances[pos] + 1
pos += 1
state.append(n)
return tuple(state)
def prepare_image(file_path, debug):
global debug_mode
debug_mode = True if debug else False
box = process_image_file(file_path)
state = create_state(poisitions, box)
return state
--- FILE SEPARATOR ---
import heapq
import functools
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("Order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, key):
"""Return True if the key is in PriorityQueue."""
return any([item == key for _, item in self.heap])
def __getitem__(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return value
raise KeyError(str(key) + " is not in the priority queue")
def __delitem__(self, key):
"""Delete the first occurrence of key."""
try:
del self.heap[[item == key for _, item in self.heap].index(True)]
except ValueError:
raise KeyError(str(key) + " is not in the priority queue")
heapq.heapify(self.heap)
def get_item(self, key):
"""Returns the first node associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for _, item in self.heap:
if item == key:
return item
raise KeyError(str(key) + " is not in the priority queue")
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def draw_state(state, file_path):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
padded = np.zeros((size, w*100), np.uint8)
padded[size - (h+1)*100 : size, :] = image
h = len(state) - 1
bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8)
bg[20: h*100 + 20, 20: w*100 + 20] = padded
bg[0:10, :] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:, 0:10] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:,w*100 + 30 : w*100 + 40] = 255
w, h = (w*100 + 40, h*100 + 40)
l = max(w, h)
adjust = np.zeros((l, l), np.uint8)
d_w = (l - w) // 2
d_h = (l - h) // 2
adjust[d_h: d_h + h, d_w: d_w + w] = bg
cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust)
--- FILE SEPARATOR ---
from aima3.search import *
from utils import *
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# file che contine l'implementazione del problema basata con AIMA
class BlocksWorld(Problem):
def __init__(self, initial, goal):
super().__init__(initial, goal)
# restituisce il numero di blocchi
def get_blocks_number(self):
return len(self.initial)
# restituisce la lista delle possibili azioni nello stato corrente
def actions(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
actions = []
for block in tops:
n, i, j = block
for col in range(size):
if col != j:
if col in columns:
actions.append((n, columns[col][1]+1, col))
else:
actions.append((n, 0, col))
return actions
#
def result(self, state, actions):
blocks = [*state[0:-1]]
size = state[-1]
to_delete = ()
for block in blocks:
if block[0] == actions[0]:
to_delete = block
blocks.remove(to_delete)
blocks.append((actions))
blocks.append(size)
return tuple(blocks)
# verifica se lo stato passato è lo stato finale
def goal_test(self, state):
op_1 = [*state[0:-1]]
op_2 = [*self.goal[0:-1]]
op_1.sort(key=lambda l: l[0])
op_2.sort(key=lambda l: l[0])
return str(op_1) == str(op_2)
# restituisce i blocchi che possono essere spostati nello stato che viene passato
def get_movable(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
return tops
# euristica che calcola il numero di blocchi in posizione errata
def misplaced_blocks(self, node):
blocks = [*node.state[0:-1]]
target = [*self.goal[0:-1]]
target.sort(key=lambda l: l[0])
value = 0
for block in blocks:
n, i, j = block
if target[n-1][1:3] != (i, j):
value += 1
# if block not in self.get_movable(node.state):
# value += 1
return value
# ritorna la profondità di un nodo nell'albero di ricerca
def depth(self, node):
return node.depth
# stampa la lista delle azioni che portano dallo stato iniziale allo stato finale
def solution(self, actions, output=True):
if len(actions) is None:
return
state = self.initial
successor = None
n = 1
print("Lunghezza soluzione: " + str(len(actions)))
for action in actions:
print(action)
successor = self.result(state, action)
if output:
figue_1 = self.draw_state(state)
figue_2 = self.draw_state(successor)
_, axarr = plt.subplots(1, 2)
axarr[0].imshow(figue_1, cmap=plt.cm.binary)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[0].set_xlabel(f"\nStato {n}")
axarr[1].imshow(figue_2, cmap=plt.cm.binary)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
axarr[1].set_xlabel(f"\nStato {n+1}")
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.show()
state = successor
n += 1
# metodo che fornisce una rappresentazione grafica dello stato che gli viene passato
def draw_state(self, state):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
adjust = np.zeros((size, w*100), np.uint8)
adjust[size - (h+1)*100 : size, :] = image
return adjust
|
{
"imported_by": [],
"imports": [
"/load_state.py",
"/utils.py",
"/blocks_world.py"
]
}
|
DiegoArcelli/BlocksWorld
|
/main.py
|
from PIL import Image, ImageTk
from load_state import prepare_image
from utils import draw_state
from blocks_world import BlocksWorld
from search_algs import *
import argparse
from inspect import getfullargspec
# file che definisce lo script da linea di comando per utilizzare il programma
if __name__ == "__main__":
search_algs = {
"astar": a_star,
"ucs": ucs,
"rbfs": rbfs,
"bfs": graph_bfs,
"dfs": graph_dfs,
"ids": ids
}
parser = argparse.ArgumentParser(description="Blocks World")
parser.add_argument("--initial", "-i", type=str, default=None, required=True, help="The image representing the initial state")
parser.add_argument("--goal", "-g", type=str, default=None, required=True, help="The image representing the goal state")
parser.add_argument("--algorithm", "-a", type=str, default=None, required=True, help="The search algorithm used")
parser.add_argument("--debug", "-d", default=False, required=False, action='store_true', help="Shows the steps of the image processing")
parser.add_argument("--output", "-o", default=False, required=False, action='store_true', help="The solution is printed graphically")
args = vars(parser.parse_args())
initial_state_path = args["initial"]
goal_state_path = args["goal"]
search_alg = args["algorithm"]
debug = args["debug"]
output = args["output"]
initial_state = prepare_image(initial_state_path, debug)
goal_state = prepare_image(goal_state_path, debug)
print(initial_state)
print(goal_state)
functions = {
"ucs": lambda n: problem.depth(n),
"astar": lambda n: problem.misplaced_blocks(n),
"rbfs": lambda n: problem.misplaced_blocks(n)
}
problem = BlocksWorld(initial_state, goal_state)
if len(getfullargspec(search_algs[search_alg]).args) == 2:
problem.solution(search_algs[search_alg](problem, functions[search_alg]).solution(), output)
else:
problem.solution(search_algs[search_alg](problem).solution(), output)
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import glob
from tensorflow import keras
from math import ceil
deteced = [np.array([]) for x in range(6)] # lista che contiene le immagini delle cifre
poisitions = [None for x in range(6)] # lista che contiene la posizione delle cifre nell'immagine
debug_mode = False
model = keras.models.load_model("./model/model.h5") # carica il modello allenato sul datase del MNIST
# funzione che si occupa del riconoscimento della cifra presente nell'immagine
# che gli viene passato come parametro
def predict(image):
h, w = image.shape
l = int(max(image.shape)*1.2)
n_h = int((l - h)/2)
n_w = int((l - w)/2)
img = np.zeros((l, l), np.uint8)
img[n_h : n_h + h, n_w : n_w + w] = image
img = (img / 255).astype('float64')
img = cv.resize(img, (28, 28), interpolation = cv.INTER_AREA)
_in = np.array([img])
_in = np.expand_dims(_in, -1)
digit = np.argmax(model.predict(_in))
if debug_mode:
print(digit)
show(img)
return digit - 1 if digit > 0 else -1
# stampa a schermo l'immagine che gli veiene passata come parametro
def show(img):
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.show()
# prime modifiche all'immagine che consistono nell'applicazione di blur
def preprocess(image):
image = cv.medianBlur(image, 3)
image = cv.GaussianBlur(image, (3, 3), 0)
return 255 - image
def postprocess(image):
image = cv.medianBlur(image, 5)
image = cv.medianBlur(image, 5)
kernel = np.ones((3, 3), np.uint8)
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel)
kernel = np.ones((3, 3), np.uint8)
image = cv.erode(image, kernel, iterations=2)
return image
def get_block_index(image_shape, yx, block_size):
y = np.arange(max(0, yx[0]-block_size),
min(image_shape[0], yx[0]+block_size))
x = np.arange(max(0, yx[1]-block_size),
min(image_shape[1], yx[1]+block_size))
return np.meshgrid(y, x)
def adaptive_median_threshold(img_in):
med = np.median(img_in)
threshold = 40
img_out = np.zeros_like(img_in)
img_out[img_in - med < threshold] = 255
return img_out
def block_image_process(image, block_size):
out_image = np.zeros_like(image)
for row in range(0, image.shape[0], block_size):
for col in range(0, image.shape[1], block_size):
idx = (row, col)
block_idx = get_block_index(image.shape, idx, block_size)
out_image[block_idx] = adaptive_median_threshold(image[block_idx])
return out_image
def clean(image):
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if search_noise(contour, approx, image.shape[::-1]):
cv.drawContours(image, [approx], 0, 255, -1)
return image
def search_noise(contour, approx, image_size):
i_h, i_w = image_size
x, y, w, h = cv.boundingRect(approx)
image_area = i_w*i_h
if cv.contourArea(contour) >= image_area/1000:
return False
if w >= i_w/50 or h >= i_h/50:
return False
return True
def find_digits(image, org_image, org):
contours, hierarchy = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
i = 0
for contour in contours:
approx = cv.approxPolyDP(contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
if hierarchy[0][i][3] == -1:
prev = predict(org_image[y:y+h, x:x+w])
if prev != -1:
deteced[prev] = org[y:y+h, x:x+w]
poisitions[prev] = (x, y, x + w, y + h)
i += 1
# funzione che individua il box che contiene i blocchi ed individua le cifre
def find_box(image):
o_h, o_w = image.shape[0:2]
contours, hierarchy = cv.findContours(
image, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
contours.sort(reverse=True, key=lambda c: cv.contourArea(c))
contour = contours[1]
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
x, y, w, h = cv.boundingRect(approx)
box = (x, y, x + w, y + h)
img = image[y:y+h, x:x+w]
sub = img.copy()
bg = ~np.zeros((h + 50, w + 50), np.uint8)
bg[25: 25 + h, 25: 25 + w] = img
img = bg
i = 0
i_h, i_w = img.shape[0:2]
tot = np.zeros(shape=(i_h, i_w))
if debug_mode:
print(image)
contours, hierarchy = cv.findContours(img, cv.RETR_TREE, cv.CHAIN_APPROX_NONE)
for contour in contours:
approx = cv.approxPolyDP(
contour, 0.001 * cv.arcLength(contour, True), True)
if hierarchy[0][i][3] == 0:
cv.drawContours(tot, [approx], 0, 255, -1)
if hierarchy[0][i][3] == 1:
cv.drawContours(tot, [approx], 0, 0, -1)
i += 1
tot = tot[25: 25 + h, 25: 25 + w]
kernel = np.ones((5, 5), np.uint8)
tot = cv.dilate(tot, kernel, iterations=3)
tot = tot.astype('uint32')
sub = sub.astype('uint32')
res = sub + tot
res = np.where(res == 0, 255, 0)
result = np.zeros((o_h, o_w), np.uint8)
result[y:y+h, x:x+w] = res
if debug_mode:
show(result)
return (result, box)
def get_block_borders(dims, image):
x_i, y_i, x_f, y_f = dims
kernel = np.ones((5, 5), np.uint8)
image = cv.erode(image, kernel, iterations=1)
y_m = (y_f + y_i) // 2
x_m = (x_f + x_i) // 2
t = x_i - 1
while image[y_m, t] != 255:
t-=1
x_i = t
t = x_f + 1
while image[y_m, t] != 255:
t+=1
x_f = t
t = y_i - 1
while image[t, x_m] != 255:
t-=1
y_i = t
t = y_f + 1
while image[t, x_m] != 255:
t+=1
y_f = t
return (x_i, y_i, x_f, y_f)
def process_image_file(filename):
global deteced, poisitions, explored, debug_mode
block_size = 50
deteced = [np.array([]) for x in range(6)]
poisitions = [None for x in range(6)]
explored = []
image_in = cv.cvtColor(cv.imread(filename), cv.COLOR_BGR2GRAY)
if debug_mode:
show(image_in)
image_in_pre = preprocess(image_in)
image_out = block_image_process(image_in_pre, block_size)
image_out = postprocess(image_out)
image_out = clean(image_out)
if debug_mode:
show(image_out)
digits, box = find_box(image_out)
find_digits(digits, ~image_out, image_in)
for i in range(6):
if deteced[i].size > 0:
image = deteced[i]
x, y, w, h = get_block_borders(poisitions[i], ~image_out)
poisitions[i] = (x, y, w, h)
cv.rectangle(image_in, (x, y), (w, h), 255, 2)
if debug_mode:
show(image_in)
return box
def check_intersection(values):
v1_i, v1_f, v2_i, v2_f = values
v2_m = (v2_i + v2_f) // 2
if v1_i < v2_m and v1_f > v2_m:
return True
return False
def create_state(poisitions, box):
cols = [[] for x in range(6)]
mean_points = []
for i in range(6):
if poisitions[i] is not None:
x1_i, y1_i, x1_f, y1_f = poisitions[i]
mean_points.append(((x1_f + x1_i) // 2, ((y1_f + y1_i) // 2)))
c = [i+1]
for j in range(6):
if poisitions[j] is not None and j != i:
x2_i, y2_i, x2_f, y2_f = poisitions[j]
if check_intersection((x1_i, x1_f, x2_i, x2_f)):
c.append(j+1)
c.sort()
cols[i] = tuple([*c])
else:
cols[i] = ()
temp_cols = list(set(tuple(cols)))
if () in temp_cols:
temp_cols.remove(())
cols = []
for t_col in temp_cols:
col = list(t_col)
col.sort(reverse=True, key=lambda e: mean_points[e-1][1])
cols.append(tuple(col))
cols.sort(key=lambda e: mean_points[e[0]-1][0])
bottoms = [col[0] for col in cols]
distances = []
xb_i, _, xb_f, _ = box
x_i, _, x_f, _ = poisitions[bottoms[0]-1]
dist = abs(x_i - xb_i)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(bottoms)-1):
x1_i, _, x1_f, _ = poisitions[bottoms[i]-1]
x2_i, _, _, _ = poisitions[bottoms[i+1]-1]
dist = abs(x2_i - x1_f)
dist = dist / (x1_f - x1_i)
distances.append(dist)
x_i, _, x_f, _ = poisitions[bottoms[-1]-1]
dist = abs(xb_f - x_f)
dist = dist / (x_f - x_i)
distances.append(dist)
for i in range(len(distances)):
dist = distances[i]
if dist - int(dist) >= 0.5:
distances[i] = int(dist) + 1
else:
distances[i] = int(dist)
n = sum(distances) + len(cols)
i = distances[0]
state = []
pos = 1
for col in cols:
j = 0
for block in col:
state.append((block, j, i))
j += 1
i += distances[pos] + 1
pos += 1
state.append(n)
return tuple(state)
def prepare_image(file_path, debug):
global debug_mode
debug_mode = True if debug else False
box = process_image_file(file_path)
state = create_state(poisitions, box)
return state
--- FILE SEPARATOR ---
import heapq
import functools
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
class PriorityQueue:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("Order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, key):
"""Return True if the key is in PriorityQueue."""
return any([item == key for _, item in self.heap])
def __getitem__(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return value
raise KeyError(str(key) + " is not in the priority queue")
def __delitem__(self, key):
"""Delete the first occurrence of key."""
try:
del self.heap[[item == key for _, item in self.heap].index(True)]
except ValueError:
raise KeyError(str(key) + " is not in the priority queue")
heapq.heapify(self.heap)
def get_item(self, key):
"""Returns the first node associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for _, item in self.heap:
if item == key:
return item
raise KeyError(str(key) + " is not in the priority queue")
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn
def draw_state(state, file_path):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
padded = np.zeros((size, w*100), np.uint8)
padded[size - (h+1)*100 : size, :] = image
h = len(state) - 1
bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8)
bg[20: h*100 + 20, 20: w*100 + 20] = padded
bg[0:10, :] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:, 0:10] = 255
bg[h*100 + 30 : h*100 + 40, :] = 255
bg[:,w*100 + 30 : w*100 + 40] = 255
w, h = (w*100 + 40, h*100 + 40)
l = max(w, h)
adjust = np.zeros((l, l), np.uint8)
d_w = (l - w) // 2
d_h = (l - h) // 2
adjust[d_h: d_h + h, d_w: d_w + w] = bg
cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust)
--- FILE SEPARATOR ---
from aima3.search import *
from utils import *
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# file che contine l'implementazione del problema basata con AIMA
class BlocksWorld(Problem):
def __init__(self, initial, goal):
super().__init__(initial, goal)
# restituisce il numero di blocchi
def get_blocks_number(self):
return len(self.initial)
# restituisce la lista delle possibili azioni nello stato corrente
def actions(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
actions = []
for block in tops:
n, i, j = block
for col in range(size):
if col != j:
if col in columns:
actions.append((n, columns[col][1]+1, col))
else:
actions.append((n, 0, col))
return actions
#
def result(self, state, actions):
blocks = [*state[0:-1]]
size = state[-1]
to_delete = ()
for block in blocks:
if block[0] == actions[0]:
to_delete = block
blocks.remove(to_delete)
blocks.append((actions))
blocks.append(size)
return tuple(blocks)
# verifica se lo stato passato è lo stato finale
def goal_test(self, state):
op_1 = [*state[0:-1]]
op_2 = [*self.goal[0:-1]]
op_1.sort(key=lambda l: l[0])
op_2.sort(key=lambda l: l[0])
return str(op_1) == str(op_2)
# restituisce i blocchi che possono essere spostati nello stato che viene passato
def get_movable(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
return tops
# euristica che calcola il numero di blocchi in posizione errata
def misplaced_blocks(self, node):
blocks = [*node.state[0:-1]]
target = [*self.goal[0:-1]]
target.sort(key=lambda l: l[0])
value = 0
for block in blocks:
n, i, j = block
if target[n-1][1:3] != (i, j):
value += 1
# if block not in self.get_movable(node.state):
# value += 1
return value
# ritorna la profondità di un nodo nell'albero di ricerca
def depth(self, node):
return node.depth
# stampa la lista delle azioni che portano dallo stato iniziale allo stato finale
def solution(self, actions, output=True):
if len(actions) is None:
return
state = self.initial
successor = None
n = 1
print("Lunghezza soluzione: " + str(len(actions)))
for action in actions:
print(action)
successor = self.result(state, action)
if output:
figue_1 = self.draw_state(state)
figue_2 = self.draw_state(successor)
_, axarr = plt.subplots(1, 2)
axarr[0].imshow(figue_1, cmap=plt.cm.binary)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[0].set_xlabel(f"\nStato {n}")
axarr[1].imshow(figue_2, cmap=plt.cm.binary)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
axarr[1].set_xlabel(f"\nStato {n+1}")
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.show()
state = successor
n += 1
# metodo che fornisce una rappresentazione grafica dello stato che gli viene passato
def draw_state(self, state):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
adjust = np.zeros((size, w*100), np.uint8)
adjust[size - (h+1)*100 : size, :] = image
return adjust
|
{
"imported_by": [],
"imports": [
"/load_state.py",
"/utils.py",
"/blocks_world.py"
]
}
|
DiegoArcelli/BlocksWorld
|
/search_algs.py
|
from aima3.search import *
from utils import *
from collections import deque
from blocks_world import BlocksWorld
import sys
# file che contiene le implementazioni degli algoritmi di ricerca
node_expanded = 0 # numero di nodi espansi durante la ricerca
max_node = 0 # massimo numero di nodi presenti nella frontiera durante la ricerca
f_dim = 0 # dimensione della frontiera in un dato momento
total_node = 0
def init_param():
global node_expanded, total_node, max_node, f_dim
node_expanded = 0
max_node = 0
total_node = 0
f_dim = 0
def print_param():
print(f"Nodi espansi: {node_expanded}")
print(f"Max dimensione della frontiera: {max_node}")
print(f"Dim media della frontiera: {int(total_node/node_expanded)}")
# def get_item(queue, key):
# """Returns the first node associated with key in PriorityQueue.
# Raises KeyError if key is not present."""
# for _, item in queue.heap:
# if item == key:
# return item
# raise KeyError(str(key) + " is not in the priority queue")
def show_solution(name_algo, node):
try:
print(name_algo + ":", node.solution())
except:
if type(Node) == str:
print(name_algo + ":", node)
else:
print(name_algo + ":", "No solution found")
# Graph Breadth First Search
def graph_bfs(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
frontier = deque([Node(problem.initial)])
f_dim += 1
explored = set()
while frontier:
node_expanded += 1
total_node += f_dim
node = frontier.popleft()
f_dim -= 1
explored.add(node.state)
if problem.goal_test(node.state):
# print(node_expanded)
print_param()
return node
for child_node in node.expand(problem):
if child_node.state not in explored and child_node not in frontier:
f_dim += 1
max_node = f_dim if f_dim > max_node else max_node
frontier.append(child_node)
# Graph Depth First Search
def graph_dfs(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
frontier = deque([Node(problem.initial)])
f_dim += 1
explored = set()
while frontier:
total_node += f_dim
node = frontier.pop()
node_expanded += 1
f_dim -= 1
if problem.goal_test(node.state):
print_param()
return node
explored.add(node.state)
for child_node in node.expand(problem):
if child_node.state not in explored and child_node not in frontier:
f_dim += 1
max_node = f_dim if f_dim > max_node else max_node
frontier.append(child_node)
# Uniform Cost Search
def ucs(problem, f):
global node_expanded, total_node, max_node, f_dim
init_param()
if problem.goal_test(problem.initial):
return Node(problem.initial)
f = memoize(f, 'f')
node_expanded += 1
frontier = PriorityQueue('min', f)
frontier.append(Node(problem.initial))
f_dim += 1
explored = set()
while frontier:
total_node += f_dim
node_expanded += 1
node = frontier.pop()
f_dim -= 1
# print(node, f(node))
if problem.goal_test(node.state):
print_param()
return node
explored.add(node.state)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
f_dim += 1
frontier.append(child)
max_node = f_dim if f_dim > max_node else max_node
elif child in frontier:
next_node = frontier.get_item(child)
if f(child) < f(next_node):
del frontier[next_node]
frontier.append(child)
# Depth Limited Search
def dls(problem, limit):
def recursive_dls(problem, node, limit):
global node_expanded, total_node, max_node, f_dim
node_expanded += 1
total_node += f_dim
if problem.goal_test(node.state):
return node
elif limit == 0:
return 'cutoff'
cutoff_occurred = False
for child_node in node.expand(problem):
f_dim+=1
max_node = f_dim if f_dim > max_node else max_node
result = recursive_dls(problem, child_node, limit-1)
f_dim -= 1
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
return recursive_dls(problem, Node(problem.initial), limit)
# Iterative Deepening Search
def ids(problem):
global node_expanded, total_node, max_node, f_dim
init_param()
prevexp = 0
for depth in range(sys.maxsize):
f_dim += 1
result = dls(problem, depth)
print(node_expanded - prevexp)
prevexp = node_expanded
f_dim = 0
if result != 'cutoff':
print_param()
return result
return None
# A*
def a_star(problem: BlocksWorld, h=None):
global node_expanded
h = memoize(h or problem.h)
return ucs(problem, lambda n: problem.depth(n) + h(n))
# Recursive Best First Search
def rbfs(problem, h):
global node_expanded, total_node, max_node, f_dim
init_param()
h = memoize(h or problem.h, 'h')
g = memoize(lambda n: problem.depth(n), 'g')
f = memoize(lambda n: g(n) + h(n), 'f')
def rbfs_search(problem, node, f_limit=np.inf):
global node_expanded, total_node, max_node, f_dim
node_expanded += 1
if problem.goal_test(node.state):
print_param()
return node, 0
successors = [*node.expand(problem)]
f_dim += len(successors)
total_node += f_dim
max_node = f_dim if f_dim > max_node else max_node
if len(successors) == 0:
return None, np.inf
for child in successors:
child.f = max(f(child), node.f)
while True:
successors.sort(key=lambda x: x.f)
best = successors[0]
if best.f > f_limit:
f_dim -= len(successors)
return None, best.f
alt = successors[1].f if len(successors) > 1 else np.inf
# importante, sovrascrivere best.f
result, best.f = rbfs_search(problem, best, min(f_limit, alt))
# return result
if result is not None:
f_dim -= len(successors)
return result, best.f
node = Node(problem.initial)
f(node)
f_dim += 1
return rbfs_search(problem, node)[0]
|
from aima3.search import *
from utils import *
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# file che contine l'implementazione del problema basata con AIMA
class BlocksWorld(Problem):
def __init__(self, initial, goal):
super().__init__(initial, goal)
# restituisce il numero di blocchi
def get_blocks_number(self):
return len(self.initial)
# restituisce la lista delle possibili azioni nello stato corrente
def actions(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
actions = []
for block in tops:
n, i, j = block
for col in range(size):
if col != j:
if col in columns:
actions.append((n, columns[col][1]+1, col))
else:
actions.append((n, 0, col))
return actions
#
def result(self, state, actions):
blocks = [*state[0:-1]]
size = state[-1]
to_delete = ()
for block in blocks:
if block[0] == actions[0]:
to_delete = block
blocks.remove(to_delete)
blocks.append((actions))
blocks.append(size)
return tuple(blocks)
# verifica se lo stato passato è lo stato finale
def goal_test(self, state):
op_1 = [*state[0:-1]]
op_2 = [*self.goal[0:-1]]
op_1.sort(key=lambda l: l[0])
op_2.sort(key=lambda l: l[0])
return str(op_1) == str(op_2)
# restituisce i blocchi che possono essere spostati nello stato che viene passato
def get_movable(self, state):
blocks = [*state[0:-1]]
size = state[-1]
columns = {}
tops = []
for block in blocks:
n, i, j = block
if j not in columns:
columns[j] = (n, i, j)
else:
if i > columns[j][1]:
columns[j] = (n, i, j)
for col in columns:
tops.append(columns[col])
return tops
# euristica che calcola il numero di blocchi in posizione errata
def misplaced_blocks(self, node):
blocks = [*node.state[0:-1]]
target = [*self.goal[0:-1]]
target.sort(key=lambda l: l[0])
value = 0
for block in blocks:
n, i, j = block
if target[n-1][1:3] != (i, j):
value += 1
# if block not in self.get_movable(node.state):
# value += 1
return value
# ritorna la profondità di un nodo nell'albero di ricerca
def depth(self, node):
return node.depth
# stampa la lista delle azioni che portano dallo stato iniziale allo stato finale
def solution(self, actions, output=True):
if len(actions) is None:
return
state = self.initial
successor = None
n = 1
print("Lunghezza soluzione: " + str(len(actions)))
for action in actions:
print(action)
successor = self.result(state, action)
if output:
figue_1 = self.draw_state(state)
figue_2 = self.draw_state(successor)
_, axarr = plt.subplots(1, 2)
axarr[0].imshow(figue_1, cmap=plt.cm.binary)
axarr[0].set_xticks([])
axarr[0].set_yticks([])
axarr[0].set_xlabel(f"\nStato {n}")
axarr[1].imshow(figue_2, cmap=plt.cm.binary)
axarr[1].set_xticks([])
axarr[1].set_yticks([])
axarr[1].set_xlabel(f"\nStato {n+1}")
figManager = plt.get_current_fig_manager()
figManager.full_screen_toggle()
plt.show()
state = successor
n += 1
# metodo che fornisce una rappresentazione grafica dello stato che gli viene passato
def draw_state(self, state):
blocks = [*state[0:-1]]
w = state[-1]
blocks.sort(key=lambda l: l[1], reverse=True)
h = blocks[0][1]
image = np.zeros(((h+1)*100, w*100), np.uint8)
for block in blocks:
n, i, j = block
i = h - i
digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
digit = cv.resize(digit, (100, 100))
image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
size = (len(state) - 1)*100
adjust = np.zeros((size, w*100), np.uint8)
adjust[size - (h+1)*100 : size, :] = image
return adjust
|
{
"imported_by": [],
"imports": [
"/blocks_world.py"
]
}
|
viaacode/status
|
/src/viaastatus/server/wsgi.py
|
from flask import Flask, abort, Response, send_file, request, flash, session, render_template
from flask import url_for, redirect
from viaastatus.prtg import api
from viaastatus.decorators import cacher, templated
from os import environ
import logging
from configparser import ConfigParser
import re
import hmac
from hashlib import sha256
from functools import wraps, partial
import argparse
import itertools
import werkzeug.contrib.cache as workzeug_cache
from viaastatus.server.response import Responses
import requests
log_level = logging._nameToLevel[environ.get('VERBOSITY', 'debug').upper()]
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
logging.getLogger().setLevel(log_level)
def normalize(txt):
txt = txt.replace(' ', '-').lower()
txt = re.sub('-{2,}', '-', txt)
txt = re.sub(r'\([^)]*\)', '', txt)
txt = re.sub(r'\[[^)]*\]', '', txt)
txt = re.sub('-[0-9]*$', '', txt)
txt = re.sub('-{2,}', '-', txt)
return txt
def create_app():
app = Flask(__name__)
config = ConfigParser()
config.read(environ.get('CONFIG_FILE', 'config.ini'))
app_config = config['app']
cache_timeout = int(app_config.get('cache_timeout', 30))
if cache_timeout > 0:
cache_ = workzeug_cache.SimpleCache(default_timeout=cache_timeout)
else:
cache_ = workzeug_cache.NullCache()
cache = cacher(cache_)()
cache_other = cacher(cache_, timeout=cache_timeout, key='other/%s')()
app.secret_key = app_config['secret_key']
salt = app_config['salt']
@cache_other
def get_sensors(prtg_) -> dict:
sensors = {}
cols = 'objid,name,device'
ippattern = re.compile(r'[\d\.]+')
for sensor in prtg_.table(content='sensors',
filter_type=['http', 'ftp', 'httptransaction'],
filter_active=-1,
columns=cols)['sensors']:
parentname = sensor['device']
sensor_name = sensor['name']
if sensor_name.startswith('HTTP'):
# filter out IPs
if ippattern.fullmatch(parentname):
continue
sensor_name = parentname + ' - ' + sensor_name
sensor_name = normalize(sensor_name)
if sensor_name in sensors:
logger.warning("Sensor '%s' is conflicting (current id: %d, requested to set to: %d), ignored",
sensor_name,
sensors[sensor_name],
sensor['objid'])
continue
sensors[sensor_name] = int(sensor['objid'])
return sensors
def _token(*args, **kwargs):
"""Calculates the token
"""
params = str([args, kwargs])
return hmac.new(salt.encode('utf-8'), params.encode('utf-8'), sha256).hexdigest()[2:10]
def secured_by_login(func):
"""
Decorator to define routes secured_by_login
"""
@wraps(func)
def _(*args, **kwargs):
if not login_settings:
logger.info('Login requested but refused since no login data in config')
abort(404)
if not session.get('authenticated'):
return _login()
return func(*args, **kwargs)
return _
def secured_by_token(func):
"""
Decorator to define routes secured_by_token.
"""
@wraps(func)
def _(*args, **kwargs):
check_token = 'authenticated' not in session
if 'ignore_token' in kwargs:
check_token = not kwargs['ignore_token']
del kwargs['ignore_token']
if check_token:
token = request.args.get('token')
expected_token = _token(*args, **kwargs)
if token != expected_token:
logger.warning("Wrong token '%s' for %s, expected: '%s'", token, func.__name__, expected_token)
abort(401)
return func(*args, **kwargs)
_._secured_by_token = _token
return _
prtg_conf = config['prtg']
_requests = requests.Session()
if 'certificate' in prtg_conf:
_requests.cert = (prtg_conf['certificate'], prtg_conf['private_key'])
prtg = api.API.from_credentials(prtg_conf['host'], prtg_conf['username'], prtg_conf['password'], _requests)
login_settings = None
if config.has_section('login'):
login_settings = dict(config['login'])
class Choices:
@staticmethod
def sensor():
return list(get_sensors(prtg).keys())
@staticmethod
def type_():
return {'json', 'png', 'txt', 'html'}
@staticmethod
def ttype():
return {'json', 'txt', 'html'}
@app.route('/login', methods=['GET'])
@templated('login.html')
def _login():
pass
@app.route('/urls', methods=['GET'])
@secured_by_login
@templated('urls.html')
def _urls():
context = {}
rules = [rule
for rule in application.url_map.iter_rules()
if rule.is_leaf
and rule.endpoint != 'static'
and not rule.endpoint.startswith('_')]
method_types = {}
for i in range(len(rules)):
rule = rules[i]
rules[i] = rules[i].__dict__
kargs = [argname for argname in rule.arguments if hasattr(Choices, argname)]
vargs = [getattr(Choices, argname)() for argname in kargs]
methods = []
for params in itertools.product(*vargs):
params = dict(zip(kargs, params))
url = url_for(rule.endpoint, **params)
view_func = app.view_functions[rule.endpoint]
if hasattr(view_func, '_secured_by_token'):
url += '?token=%s' % (view_func._secured_by_token(**params))
methods.append({
"name": rule.endpoint,
"params": params,
"url": url,
})
method_types[rule.endpoint] = methods
context['method_types'] = method_types
return context
@app.route('/login', methods=['POST'])
def _do_login():
if not login_settings:
logger.info('Login requested but refused since no login data in config')
abort(404)
if request.form['password'] != login_settings['password'] or \
request.form['username'] != login_settings['username']:
flash('Invalid credentials!')
else:
session['authenticated'] = True
return redirect('/urls')
@app.route('/', methods=['GET'])
@cache
@templated('oldstatus.html')
def index_():
pass
@app.route('/sensors.<ttype>')
@cache
@secured_by_token
def sensors_(ttype):
if ttype not in Choices.ttype():
abort(404)
return getattr(Responses, ttype)(Choices.sensor())
@app.route('/status/<sensor>.<type_>', methods=['GET'])
@cache
@secured_by_token
def status_(sensor, type_):
"""
:param str sensor: Name of the sensor
:param str type_: Response type
:return:
"""
if type_ not in Choices.type_():
abort(404)
try:
sensors = get_sensors(prtg)
if sensor not in sensors:
abort(404)
sensor_id = sensors[sensor]
status = prtg.getsensordetails(id=sensor_id)['sensordata']
except Exception as e:
if type_ == 'png':
return Responses.status(None)
raise e
if type_ == 'png':
if int(status['statusid']) in [3, 4]:
status = True
elif int(status['statusid']) in [7, 8, 9, 10, 12]:
status = None
else:
status = False
return Responses.status(status)
if type_ == 'txt':
status = status['statustext']
elif type_ == 'html':
status_msg = '''
<dl>
<dt>%s</dt>
<dd><a href="%s/sensor.htm?id=%d">%s</a></dd>
</dl>
'''
status = status_msg % (prtg._host, sensor, sensor_id, status['statustext'])
return getattr(Responses, type_)(status)
@app.route('/status', methods=['GET'])
@templated('statuspage.html')
def status_page():
if not config.has_section('aliases'):
abort(404)
aliases = {url: fwd.split(':')[1] for url, fwd in config['aliases'].items()}
return dict(aliases=aliases)
# add aliases
if config.has_section('aliases'):
for url, target in config['aliases'].items():
target = target.split(':')
name = target.pop(0)
func = app.view_functions[name]
kwargs = dict(ignore_token=True)
func = partial(func, *target, **kwargs)
func.__name__ = url
app.route(url)(func)
return app
application = create_app()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='run in debug mode')
parser.add_argument('--host',
help='hostname or ip to serve app')
parser.add_argument('--port', type=int, default=1111,
help='port used by the server')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logger.setLevel(logging.DEBUG)
application.run(host=args.host, port=args.port, debug=args.debug)
|
import os
from flask import jsonify, Response
import flask
class FileResponse(Response):
default_mimetype = 'application/octet-stream'
def __init__(self, filename, **kwargs):
if not os.path.isabs(filename):
filename = os.path.join(flask.current_app.root_path, filename)
with open(filename, 'rb') as f:
contents = f.read()
response = contents
super().__init__(response, **kwargs)
class StatusResponse(FileResponse):
default_mimetype = 'image/png'
def __init__(self, status, **kwargs):
if status is True:
status = 'ok'
elif status is False:
status = 'nok'
else:
status = 'unk'
filename = 'static/status-%s.png' % (status,)
super().__init__(filename, **kwargs)
class Responses:
@staticmethod
def json(obj):
return jsonify(obj)
@staticmethod
def html(obj):
return Response('<html><body>%s</body></html>' % (obj,), content_type='text/html')
@staticmethod
def txt(obj):
if type(obj) is not str:
obj = '\n'.join(obj)
return Response(obj, content_type='text/plain')
@staticmethod
def status(status_):
return StatusResponse(status_)
--- FILE SEPARATOR ---
from functools import wraps, partial
from flask import request, render_template
def cached(key='view/%s', cache=None, **extra_cache_kwargs):
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
cache_key = key % request.path
rv = cache.get(cache_key)
if rv is not None:
return rv
rv = f(*args, **kwargs)
cache.set(cache_key, rv, **extra_cache_kwargs)
return rv
return decorated
return decorator
def cacher(cache, **kwargs):
return partial(cached, cache=cache, **kwargs)
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated
return decorator
|
{
"imported_by": [],
"imports": [
"/src/viaastatus/server/response.py",
"/src/viaastatus/decorators.py"
]
}
|
digital-sustainability/swiss-procurement-classifier
|
/runIterations.py
|
from learn import ModelTrainer
from collection import Collection
import pandas as pd
import logging
import traceback
import os
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# === THESIS ===
anbieter_config = {
'Construction': [
'Alpiq AG',
'KIBAG',
'Egli AG',
],
'IT': [
'Swisscom',
'ELCA Informatik AG',
'Unisys',
],
'Other': [
'Kummler + Matter AG',
'Thermo Fisher Scientific (Schweiz) AG',
'AXA Versicherung AG',
],
'Diverse': [
'Siemens AG',
'ABB',
'Basler & Hofmann West AG',
]
}
# === TESTING ===
#anbieter = 'Marti AG' #456
#anbieter = 'Axpo AG' #40
#anbieter = 'Hewlett-Packard' #90
#anbieter = 'BG Ingénieurs Conseils' SA #116
#anbieter = 'Pricewaterhousecoopers' #42
#anbieter = 'Helbling Beratung + Bauplanung AG' #20
#anbieter = 'Ofrex SA' #52
#anbieter = 'PENTAG Informatik AG' #10
#anbieter = 'Wicki Forst AG' #12
#anbieter = 'T-Systems Schweiz' #18
#anbieter = 'Bafilco AG' #20
#anbieter = '4Video-Production GmbH' #3
#anbieter = 'Widmer Ingenieure AG' #6
#anbieter = 'hmb partners AG' #2
#anbieter = 'Planmeca' #4
#anbieter = 'K & M Installationen AG' #4
select = (
"ausschreibung.meldungsnummer, "
"anbieter.institution as anbieter_institution, "
"auftraggeber.beschaffungsstelle_plz, "
"ausschreibung.gatt_wto, "
"ausschreibung.sprache, "
"ausschreibung.auftragsart, "
"ausschreibung.auftragsart_art, "
"ausschreibung.lose, "
"ausschreibung.teilangebote, "
"ausschreibung.varianten, "
"ausschreibung.bietergemeinschaft, "
"cpv_dokument.cpv_nummer as ausschreibung_cpv"
)
attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
#attributes = ['auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'ausschreibung_cpv', 'gatt_wto','teilangebote', 'sprache']
#attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
# attributes = [
# [ 'ausschreibung_cpv', 'auftragsart_art' ],
# [ 'ausschreibung_cpv', 'beschaffungsstelle_plz' ],
# [ 'ausschreibung_cpv', 'auftragsart' ],
# [ 'ausschreibung_cpv', 'gatt_wto' ],
# [ 'ausschreibung_cpv', 'lose' ],
# [ 'ausschreibung_cpv', 'teilangebote' ],
# [ 'ausschreibung_cpv', 'varianten' ],
# [ 'ausschreibung_cpv', 'sprache' ]
# ]
config = {
# ratio that the positive and negative responses have to each other
'positive_to_negative_ratio': 0.5,
# Percentage of training set that is used for testing (Recommendation of at least 25%)
'test_size': 0.25,
'runs': 100,
#'enabled_algorithms': ['random_forest'],
'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'],
'random_forest': {
# Tune Random Forest Parameter
'n_estimators': 100,
'max_features': 'sqrt',
'max_depth': None,
'min_samples_split': 4
},
'decision_tree': {
'max_depth': 30,
'max_features': 'sqrt',
'min_samples_split': 4
},
'gradient_boost': {
'n_estimators': 100,
'learning_rate': 0.1,
'max_depth': 30,
'min_samples_split': 4,
'max_features': 'sqrt'
}
}
class IterationRunner():
def __init__(self, anbieter_config, select, attributes, config):
self.anbieter_config = anbieter_config
self.select = select
self.attributes = attributes
self.config = config
self.trainer = ModelTrainer(select, '', config, attributes)
self.collection = Collection()
def run(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr_id in range(len(self.attributes)):
att_list = self.attributes[:attr_id+1]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesEachOne(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr in self.attributes:
att_list = [attr]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for att_list in self.attributes:
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runSimpleAttributeList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
self.singleRun(anbieter, self.attributes, label)
self.trainer.resetSQLData()
def singleRun(self, anbieter, att_list, label):
logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list))
try:
self.trainer.attributes = att_list
self.trainer.anbieter = anbieter
output = self.trainer.run()
output['label'] = label
self.collection.append(output)
filename = os.getenv('DB_FILE', 'dbs/auto.json')
self.collection.to_file(filename)
except Exception as e:
traceback.print_exc()
print(e)
print('one it done')
runner = IterationRunner(anbieter_config, select, attributes, config)
if __name__ == '__main__':
# runner.collection.import_file('dbs/auto.json')
runner.run()
runner.runAttributesEachOne()
runner.runAttributesList()
# label, anbieters = next(iter(runner.anbieter_config.items()))
# print(label)
|
import pandas as pd
import numpy as np
import math
import re
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef
from sklearn import tree
from db import connection, engine
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ModelTrainer():
def __init__(self, select, anbieter, config, attributes=[]):
self.anbieter = anbieter
self.select = select
self.attributes = attributes
self.config = config
def run(self):
self.queryData()
prepared_positives, prepared_negatives, duplicates = self.prepare_data()
result = self.trainAllModels(prepared_positives, prepared_negatives)
result['duplicates'] = duplicates.to_dict()
return result
def resetSQLData(self):
try:
del self.positives
del self.negatives
except:
pass
def trainAllModels(self, positives, negatives):
result = {
'attributes': self.attributes,
'anbieter': self.anbieter,
'timestamp': datetime.now().isoformat()
}
samples = self.createSamples(positives, negatives)
result = {**result, **self.trainAllAlgorithms(samples)}
return result
def createSamples(self, positives, negatives):
negative_sample_size = math.ceil(len(positives) * (self.config['positive_to_negative_ratio'] + 1))
samples = []
for runIndex in range(self.config['runs']):
negative_sample = negatives.sample(negative_sample_size, random_state=runIndex)
sample = positives.append(negative_sample, ignore_index=True)
sample.reset_index(drop=True, inplace=True)
sample.fillna(0, inplace=True)
sample = shuffle(sample, random_state=runIndex)
samples.append(sample)
return samples
def trainAllAlgorithms(self, samples):
result = {}
for algorithm in self.config['enabled_algorithms']:
if algorithm == 'random_forest':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
min_samples_split = self.config[algorithm]['min_samples_split']
classifier = lambda randomState: RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
min_samples_split=min_samples_split,
random_state=randomState,
n_jobs=-1
)
elif algorithm == 'gradient_boost':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
learning_rate = self.config[algorithm]['learning_rate']
classifier = lambda randomState: GradientBoostingClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
learning_rate=learning_rate,
random_state=randomState
)
elif algorithm == 'decision_tree':
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
classifier = lambda randomState: DecisionTreeClassifier(
max_depth=max_depth,
max_features=max_features
)
else:
raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm))
result[algorithm] = {}
x_tests, y_tests = self.trainModel(samples, classifier, algorithm)
result[algorithm]['metrics'] = self.config[algorithm]
evaluation_dataframe = pd.concat([self.__getConfusionMatices(y_tests), self.__getAccuracies(y_tests)], axis=1, sort=False)
result[algorithm]['data'] = evaluation_dataframe.to_dict()
result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe)
return result
def trainModel(self, samples, get_classifier, algorithm):
x_tests = []
y_tests = []
for runIndex, sample in enumerate(samples):
classifier = get_classifier(runIndex)
train, test = train_test_split(sample, random_state=runIndex)
if 'skip_cross_val' not in self.config or not self.config['skip_cross_val']:
# Compute cross validation (5-fold)
scores = self.__cross_val_score(classifier, train, cv=5)
print(scores)
print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, runIndex, round(sum(scores)/len(scores), 4)))
# Select all attributes
x_test = test.drop(['Y'], axis=1)
x_train = train.drop(['Y'], axis=1)
# Only select the response result attributes
y_test = test[['Y']].copy()
y_train = train[['Y']]
# Create the model
# Train the model on training sets
classifier = classifier.fit(x_train, y_train['Y'])
# print the max_depths of all classifiers in a Random Forest
if algorithm == 'random_forest':
print('Random Forest Depts:', [self.dt_max_depth(t.tree_) for t in classifier.estimators_])
# Create a file displaying the tree
if 'draw_tree' in self.config and self.config['draw_tree'] and algorithm == 'decision_tree' and runIndex == 0:
tree.export_graphviz(classifier, out_file='tree.dot', feature_names=x_train.columns)
# Predict on the test sets
prediction = classifier.predict(x_test)
# Add run number to df
y_test['run'] = runIndex
x_test['run'] = runIndex
# add prediction to df
y_test['prediction'] = prediction
# add result of run to df
y_test['correct'] = y_test['prediction'] == y_test['Y']
# add run to run arrays
x_tests.append(x_test)
y_tests.append(y_test)
return x_tests, y_tests
def queryData(self):
if not hasattr(self, 'positives') or not hasattr(self, 'negatives'):
self.positives = self.__runSql(True)
self.negatives = self.__runSql(False)
logger.info('sql done')
return self.positives, self.negatives
def __runSql(self, response):
resp = '='
if (not response):
resp = '!='
query = """SELECT {} from beruecksichtigteanbieter_zuschlag
JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer
JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id
JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id
JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id
JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id
JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer
WHERE anbieter.institution {} "{}"
ORDER BY ausschreibung.meldungsnummer;
""".format(self.select, resp, self.anbieter)
return pd.read_sql(query, engine)
def prepareUnfilteredRun(self, positive_sample, negative_samples):
merged_samples_for_names = []
for negative_sample in negative_samples:
# Merge positive and negative df into one
merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy())
return merged_samples_for_names
def __getAccuracies(self, dfys):
res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate'])
for dfy in dfys:
acc = round(accuracy_score(dfy.Y, dfy.prediction), 4)
# f1 = round(f1_score(dfy.Y, dfy.prediction), 4)
mcc = matthews_corrcoef(dfy.Y, dfy.prediction)
matrix = confusion_matrix(dfy.Y, dfy.prediction)
fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4)
# add row to end of df, *100 for better % readability
res.loc[len(res)] = [ acc*100, mcc, fnr*100 ]
return res
def __getConfusionMatices(self, dfys):
res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn'])
for dfy in dfys:
# ConfusionMatrix legende:
# [tn, fp]
# [fn, tp]
matrix = confusion_matrix(dfy.Y, dfy.prediction)
res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ]
# res.loc['sum'] = res.sum() # Summarize each column
return res
def __getIterationMetadata(self, df):
res = {}
res['acc_mean'] = df['accuracy'].mean()
res['acc_median'] = df['accuracy'].median()
res['acc_min'] = df['accuracy'].min()
res['acc_max'] = df['accuracy'].max()
res['acc_quantile_25'] = df['accuracy'].quantile(q=.25)
res['acc_quantile_75'] = df['accuracy'].quantile(q=.75)
res['mcc_mean'] = df['MCC'].mean()
res['mcc_median'] = df['MCC'].median()
res['mcc_min'] = df['MCC'].min()
res['mcc_max'] = df['MCC'].max()
res['mcc_quantile_25'] = df['MCC'].quantile(q=.25)
res['mcc_quantile_75'] = df['MCC'].quantile(q=.75)
res['fn_rate_mean'] = df['fn_rate'].mean()
res['fn_rate_median'] = df['fn_rate'].median()
res['fn_rate_min'] = df['fn_rate'].min()
res['fn_rate_max'] = df['fn_rate'].max()
res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25)
res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75)
res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean()
return res
def __cross_val_score(self, clf, sample, cv):
cross_val_scores = []
for validation_run_index in range(cv):
train, test = train_test_split(sample, random_state=validation_run_index)
# Select all attributes but meldungsnummer
xtest = test.drop(['Y'], axis=1)
xtrain = train.drop(['Y'], axis=1)
# Only select the response result attributes
ytest = test[['Y']]
ytrain = train[['Y']]
clf = clf.fit(xtrain, ytrain['Y'])
prediction = clf.predict(xtest)
cross_val_scores.append(accuracy_score(ytest, prediction))
return cross_val_scores
def prepare_data(self):
filter_attributes = ['meldungsnummer'] + self.attributes
# filter only specified attributes
positives = self.positives[filter_attributes].copy()
negatives = self.negatives[filter_attributes].copy()
positives['Y'] = 1
negatives['Y'] = 0
merged = positives.append(negatives, ignore_index=True)
if hasattr(self, 'cleanData'):
positives = self.cleanData(positives, self.attributes)
negatives = self.cleanData(negatives, self.attributes)
else:
# positives = self.preprocess_data(positives, self.attributes)
# negatives = self.preprocess_data(negatives, self.attributes)
merged, duplicates = self.preprocess_data(merged, self.attributes)
positives = merged[merged['Y']==1]
negatives = merged[merged['Y']==0]
return positives, negatives, duplicates
def preprocess_data(self, df, filters):
df = df.copy()
# drop duplicates before starting to preprocess
df = df.drop_duplicates()
if 'ausschreibung_cpv' in filters:
split = {
'division': lambda x: math.floor(x/1000000),
'group': lambda x: math.floor(x/100000),
'class': lambda x: math.floor(x/10000),
'category': lambda x: math.floor(x/1000)
}
for key, applyFun in split.items():
df['cpv_' + key ] = df['ausschreibung_cpv'].apply(applyFun)
tmpdf = {}
for key in split.keys():
key = 'cpv_' + key
tmpdf[key] = df[['meldungsnummer']].join(pd.get_dummies(df[key], prefix=key)).groupby('meldungsnummer').max()
encoded_df = pd.concat([tmpdf['cpv_'+ key] for key in split.keys()], axis=1)
df = df.drop(['cpv_' + key for key, fun in split.items()], axis=1)
df = df.drop(['ausschreibung_cpv'], axis=1)
df = df.drop_duplicates()
df = df.join(encoded_df, on='meldungsnummer')
if 'gatt_wto' in filters:
df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo)
if 'anzahl_angebote' in filters:
df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric)
if 'teilangebote' in filters:
df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo)
if 'lose' in filters:
df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNoOrInt)
if 'varianten' in filters:
df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo)
if 'auftragsart_art' in filters:
auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt', dummy_na=True)
df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'], axis=1)
if 'sprache' in filters:
sprache_df = pd.get_dummies(df['sprache'], prefix='lang', dummy_na=True)
df = pd.concat([df,sprache_df],axis=1).drop(['sprache'], axis=1)
if 'auftragsart' in filters:
auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr', dummy_na=True)
df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'], axis=1)
if 'beschaffungsstelle_plz' in filters:
# plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz', dummy_na=True)
# df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'], axis=1)
df['beschaffungsstelle_plz'] = df['beschaffungsstelle_plz'].apply(ModelTrainer.transformToSingleInt)
split = {
'district': lambda x: math.floor(x/1000) if not math.isnan(x) else x,
'area': lambda x: math.floor(x/100) if not math.isnan(x) else x,
}
prefix = 'b_plz_'
for key, applyFun in split.items():
df[prefix + key] = df['beschaffungsstelle_plz'].apply(applyFun)
df.rename(columns={'beschaffungsstelle_plz': prefix + 'ganz'}, inplace=True)
for key in ['ganz'] + list(split.keys()):
key = prefix + key
df = pd.concat([df, pd.get_dummies(df[key], prefix=key, dummy_na=True)], axis=1).drop(key, axis=1)
df.drop_duplicates(inplace=True)
if any(df.duplicated(['meldungsnummer'])):
logger.warning("duplicated meldungsnummer")
duplicates = df[df.duplicated(['meldungsnummer'])]
df = df.drop(['meldungsnummer'], axis=1)
return df, duplicates
def dt_max_depth(self, tree):
n_nodes = tree.node_count
children_left = tree.children_left
children_right = tree.children_right
def walk(node_id):
if (children_left[node_id] != children_right[node_id]):
left_max = 1 + walk(children_left[node_id])
right_max = 1 + walk(children_right[node_id])
return max(left_max, right_max)
else: # is leaf
return 1
root_node_id = 0
return walk(root_node_id)
# @param val: a value to be casted to numeric
# @return a value that has been casted to an integer. Returns 0 if cast was not possible
def tonumeric(val):
try:
return int(val)
except:
return 0
# @param val: a string value to be categorised
# @return uniffied gatt_wto resulting in either "Yes", "No" or "?"
@staticmethod
def unifyYesNo(val):
switcher = {
'Ja': 1,
'Sì': 1,
'Oui': 1,
'YES': 1,
'Nein': 0,
'Nei': 0,
'Non': 0,
'NO': 0,
}
return switcher.get(val, 0)
@staticmethod
def unifyYesNoOrInt(val):
try:
return int(val)
except ValueError:
return ModelTrainer.unifyYesNo(val)
@staticmethod
def transformToSingleInt(plz):
try:
result = int(plz)
except ValueError:
try:
result = int(re.search(r"\d{4}", plz).group())
except AttributeError:
return np.nan
return result if result >= 1000 and result <= 9999 else np.nan
--- FILE SEPARATOR ---
import json
import pandas as pd
import warnings
class Collection():
algorithms = ['gradient_boost', 'decision_tree', 'random_forest']
def __init__(self):
self.list = []
def append(self, item):
self.list.append(item)
def __iter__(self):
return iter(self.list)
def get_all_as_df(self, algorithm):
try:
tmp = []
for iteration in self.list:
tmp.append(iteration[algorithm]['metadata'])
return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list])
except:
warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"')
def df_row_per_algorithm(self):
tmp = []
for iteration in self.list:
for algorithm in self.algorithms:
output = iteration[algorithm]['metadata']
evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data'])
# missing metrics
output['acc_std'] = evaluation_dataframe['accuracy'].std()
evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100
output['mcc_std'] = evaluation_dataframe['MCC'].std()
output['fn_std'] = evaluation_dataframe['fn_rate'].std()
output['anbieter'] = iteration['anbieter']
output['label'] = iteration['label']
output['algorithm'] = algorithm
output['attributes'] = ",".join(iteration['attributes'])
tmp.append(output)
return pd.DataFrame(tmp)
def to_json(self, **kwargs):
return json.dumps(self.list, **kwargs)
def to_file(self, filename):
with open(filename, 'w') as fp:
json.dump(self.list, fp, indent=4, sort_keys=True)
def import_file(self, filename, force=False):
if len(self.list) and not force:
warnings.warn("Loaded Collection, pls add force=True")
else:
with open(filename, 'r') as fp:
self.list = json.load(fp)
|
{
"imported_by": [],
"imports": [
"/learn.py",
"/collection.py"
]
}
|
digital-sustainability/swiss-procurement-classifier
|
/runOldIterations.py
|
from train import ModelTrainer
from collection import Collection
import pandas as pd
import logging
import traceback
import os
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# === THESIS ===
anbieter_config = {
'Construction': [
'Alpiq AG',
'Swisscom',
'Kummler + Matter AG',
'Siemens AG'
],
'IT': [
'G. Baumgartner AG',
'ELCA Informatik AG',
'Thermo Fisher Scientific (Schweiz) AG',
'Arnold AG',
],
'Other': [
'Riget AG',
'isolutions AG',
'CSI Consulting AG',
'Aebi & Co. AG Maschinenfabrik',
],
'Divers': [
'DB Schenker AG',
'IT-Logix AG',
'AVS Syteme AG',
'Sajet SA'
]
}
# === TESTING ===
#anbieter = 'Marti AG' #456
#anbieter = 'Axpo AG' #40
#anbieter = 'Hewlett-Packard' #90
#anbieter = 'BG Ingénieurs Conseils' SA #116
#anbieter = 'Pricewaterhousecoopers' #42
#anbieter = 'Helbling Beratung + Bauplanung AG' #20
#anbieter = 'Ofrex SA' #52
#anbieter = 'PENTAG Informatik AG' #10
#anbieter = 'Wicki Forst AG' #12
#anbieter = 'T-Systems Schweiz' #18
#anbieter = 'Bafilco AG' #20
#anbieter = '4Video-Production GmbH' #3
#anbieter = 'Widmer Ingenieure AG' #6
#anbieter = 'hmb partners AG' #2
#anbieter = 'Planmeca' #4
#anbieter = 'K & M Installationen AG' #4
select_anbieter = (
"anbieter.anbieter_id, "
"anbieter.institution as anbieter_institution, "
"cpv_dokument.cpv_nummer as anbieter_cpv, "
"ausschreibung.meldungsnummer"
)
# anbieter_CPV are all the CPVs the Anbieter ever won a procurement for. So all the CPVs they are interested in.
select_ausschreibung = (
"anbieter.anbieter_id, "
"auftraggeber.institution as beschaffungsstelle_institution, "
"auftraggeber.beschaffungsstelle_plz, "
"ausschreibung.gatt_wto, "
"ausschreibung.sprache, "
"ausschreibung.auftragsart_art, "
"ausschreibung.lose, "
"ausschreibung.teilangebote, "
"ausschreibung.varianten, "
"ausschreibung.projekt_id, "
# "ausschreibung.titel, "
"ausschreibung.bietergemeinschaft, "
"cpv_dokument.cpv_nummer as ausschreibung_cpv, "
"ausschreibung.meldungsnummer as meldungsnummer2"
)
attributes = ['ausschreibung_cpv', 'auftragsart_art','beschaffungsstelle_plz','gatt_wto','lose','teilangebote', 'varianten','sprache']
# attributes = ['auftragsart_art']
config = {
# ratio that the positive and negative responses have to each other
'positive_to_negative_ratio': 0.5,
# Percentage of training set that is used for testing (Recommendation of at least 25%)
'test_size': 0.25,
'runs': 100,
#'enabled_algorithms': ['random_forest'],
'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'],
'random_forest': {
# Tune Random Forest Parameter
'n_estimators': 100,
'max_features': 'sqrt',
'max_depth': None,
'min_samples_split': 2
},
'decision_tree': {
'max_depth': 15,
'max_features': 'sqrt'
},
'gradient_boost': {
'n_estimators': 100,
'learning_rate': 0.1,
'max_depth': 15,
'max_features': 'sqrt'
}
}
# Prepare Attributes
def cleanData(df, filters):
# if 'beschaffungsstelle_plz' in filters:
# df[['beschaffungsstelle_plz']] = df[['beschaffungsstelle_plz']].applymap(ModelTrainer.tonumeric)
if 'gatt_wto' in filters:
df[['gatt_wto']] = df[['gatt_wto']].applymap(ModelTrainer.unifyYesNo)
if 'anzahl_angebote' in filters:
df[['anzahl_angebote']] = df[['anzahl_angebote']].applymap(ModelTrainer.tonumeric)
if 'teilangebote' in filters:
df[['teilangebote']] = df[['teilangebote']].applymap(ModelTrainer.unifyYesNo)
if 'lose' in filters:
df[['lose']] = df[['lose']].applymap(ModelTrainer.unifyYesNo)
if 'varianten' in filters:
df[['varianten']] = df[['varianten']].applymap(ModelTrainer.unifyYesNo)
if 'auftragsart_art' in filters:
auftrags_art_df = pd.get_dummies(df['auftragsart_art'], prefix='aftrgsrt',dummy_na=True)
df = pd.concat([df,auftrags_art_df],axis=1).drop(['auftragsart_art'],axis=1)
if 'sprache' in filters:
sprache_df = pd.get_dummies(df['sprache'], prefix='lang',dummy_na=True)
df = pd.concat([df,sprache_df],axis=1).drop(['sprache'],axis=1)
if 'auftragsart' in filters:
auftragsart_df = pd.get_dummies(df['auftragsart'], prefix='auftr',dummy_na=True)
df = pd.concat([df,auftragsart_df],axis=1).drop(['auftragsart'],axis=1)
if 'beschaffungsstelle_plz' in filters:
plz_df = pd.get_dummies(df['beschaffungsstelle_plz'], prefix='beschaffung_plz',dummy_na=True)
df = pd.concat([df,plz_df],axis=1).drop(['beschaffungsstelle_plz'],axis=1)
return df
class IterationRunner():
def __init__(self, anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData):
self.anbieter_config = anbieter_config
self.select_anbieter = select_anbieter
self.select_ausschreibung = select_ausschreibung
self.attributes = attributes
self.config = config
self.cleanData = cleanData
self.trainer = ModelTrainer(select_anbieter, select_ausschreibung, '', config, cleanData, attributes)
self.collection = Collection()
def run(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr_id in range(len(self.attributes)-1):
att_list = self.attributes[:attr_id+1]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runAttributesEachOne(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
for attr in self.attributes:
att_list = [attr]
self.singleRun(anbieter, att_list, label)
self.trainer.resetSQLData()
def runSimpleAttributeList(self):
for label, anbieters in self.anbieter_config.items():
logger.info(label)
for anbieter in anbieters:
self.singleRun(anbieter, self.attributes, label)
self.trainer.resetSQLData()
def singleRun(self, anbieter, att_list, label):
logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list))
try:
self.trainer.attributes = att_list
self.trainer.anbieter = anbieter
output = self.trainer.run()
output['label'] = label
self.collection.append(output)
filename = os.getenv('DB_FILE', 'dbs/auto.json')
self.collection.to_file(filename)
except Exception as e:
traceback.print_exc()
print(e)
print('one it done')
runner = IterationRunner(anbieter_config, select_anbieter, select_ausschreibung, attributes, config, cleanData)
if __name__ == '__main__':
# runner.collection.import_file('dbs/auto.json')
runner.run()
runner.runAttributesEachOne()
# label, anbieters = next(iter(runner.anbieter_config.items()))
# print(label)
|
import pandas as pd
import math
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, matthews_corrcoef
from db import connection, engine
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ModelTrainer():
def __init__(self, select_anbieter, select_ausschreibung, anbieter, config, cleanData, attributes=[]):
self.anbieter = anbieter
self.select_anbieter = select_anbieter
self.select_ausschreibung = select_ausschreibung
self.attributes = attributes
self.config = config
self.cleanData = cleanData
def run(self):
positive_sample, negative_samples = self.createSamples()
positive_and_negative_samples = self.prepareForRun(
positive_sample,
negative_samples
)
# most certainly used to resolve the naming functions like getFalseProjectTitle
merged_samples_for_names = self.prepareUnfilteredRun(
positive_sample,
negative_samples
)
result = self.trainSpecifiedModels(positive_and_negative_samples)
return result
# xTests, yTests = self.trainModel(positive_and_negative_samples)
def resetSQLData(self):
try:
del self.positives
del self.negatives
except:
pass
def createSamples(self):
if not hasattr(self, 'positives') or not hasattr(self, 'negatives'):
self.queryData()
negative_samples = []
negative_sample_size = math.ceil(len(self.positives) * (self.config['positive_to_negative_ratio'] + 1))
for count in range(self.config['runs']):
negative_samples.append(self.negatives.sample(negative_sample_size, random_state=count))
self.positives['Y'] = 1
for negative_sample in negative_samples:
negative_sample['Y']=0
return (self.positives, negative_samples)
def queryData(self):
self.positives = self.__runSql(True)
self.negatives = self.__runSql(False)
logger.info('sql done')
return self.positives, self.negatives
def __runSql(self, response):
resp = '='
if (not response):
resp = '!='
query = """SELECT * FROM (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = zuschlag.meldungsnummer)
WHERE anbieter.institution {} "{}" ) anbieter
JOIN (SELECT {} from ((((((beruecksichtigteanbieter_zuschlag
INNER JOIN zuschlag ON zuschlag.meldungsnummer = beruecksichtigteanbieter_zuschlag.meldungsnummer)
INNER JOIN anbieter ON beruecksichtigteanbieter_zuschlag.anbieter_id = anbieter.anbieter_id)
INNER JOIN projekt ON zuschlag.projekt_id = projekt.projekt_id)
INNER JOIN auftraggeber ON projekt.auftraggeber_id = auftraggeber.auftraggeber_id)
INNER JOIN ausschreibung ON projekt.projekt_id = ausschreibung.projekt_id)
INNER JOIN cpv_dokument ON cpv_dokument.meldungsnummer = ausschreibung.meldungsnummer)
WHERE anbieter.institution {} "{}"
) ausschreibung ON ausschreibung.meldungsnummer2 = anbieter.meldungsnummer
ORDER BY ausschreibung.meldungsnummer2;
""".format(self.select_anbieter, resp, self.anbieter, self.select_ausschreibung, resp, self.anbieter)
return pd.read_sql(query, engine)
def prepareForRun(self, positive_sample, negative_samples):
# What attributes the model will be trained by
filters = ['Y', 'projekt_id'] + self.attributes
positive_and_negative_samples = []
for negative_sample in negative_samples:
# Merge positive and negative df into one, only use selected attributes
merged_samples = positive_sample.append(negative_sample, ignore_index=True)[filters].copy()
# Clean the data of all selected attributes
cleaned_merged_samples = self.cleanData(merged_samples, self.attributes)
positive_and_negative_samples.append(cleaned_merged_samples)
return positive_and_negative_samples
def prepareUnfilteredRun(self, positive_sample, negative_samples):
merged_samples_for_names = []
for negative_sample in negative_samples:
# Merge positive and negative df into one
merged_samples_for_names.append(positive_sample.append(negative_sample, ignore_index=True).copy())
return merged_samples_for_names
def trainSpecifiedModels(self, positive_and_negative_samples):
result = {}
for algorithm in self.config['enabled_algorithms']:
if algorithm == 'random_forest':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
min_samples_split = self.config[algorithm]['min_samples_split']
classifier = lambda randomState: RandomForestClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
min_samples_split=min_samples_split,
random_state=randomState,
n_jobs=-1
)
elif algorithm == 'gradient_boost':
n_estimators = self.config[algorithm]['n_estimators']
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
learning_rate = self.config[algorithm]['learning_rate']
classifier = lambda randomState: GradientBoostingClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
max_features=max_features,
learning_rate=learning_rate,
random_state=randomState
)
elif algorithm == 'decision_tree':
max_depth = self.config[algorithm]['max_depth']
max_features = self.config[algorithm]['max_features']
classifier = lambda randomState: DecisionTreeClassifier(
max_depth=max_depth,
max_features=max_features
)
else:
raise Exception('enabled algorithm: {} doesn\'t exist.'.format(algorithm))
result[algorithm] = {}
xTests, yTests = self.trainModel(positive_and_negative_samples, classifier, algorithm)
result['attributes'] = self.attributes
result['anbieter'] = self.anbieter
result['timestamp'] = datetime.now().isoformat()
#result[algorithm]['xTests'] = xTests
#result[algorithm]['yTests'] = yTests
result[algorithm]['metrics'] = self.config[algorithm]
evaluation_dataframe =pd.concat([self.__getConfusionMatices(yTests), self.__getAccuracies(yTests)], axis=1, sort=False)
result[algorithm]['data'] = evaluation_dataframe.to_dict()
result[algorithm]['metadata'] = self.__getIterationMetadata(evaluation_dataframe)
return result
def trainModel(self, positive_and_negative_samples, classifier, algorithm):
xTests = []
yTests = []
for idx, df in enumerate(positive_and_negative_samples): # enum to get index
x_and_y_test, x_and_y_train = self.unique_train_and_test_split(df, random_state=idx)
# Select all attributes
xtest = x_and_y_test.drop(['Y'], axis=1)
xtrain = x_and_y_train.drop(['Y'], axis=1)
# Only select the response result attributes
ytest = x_and_y_test['Y']
ytrain = x_and_y_train['Y']
# Create the model
clf = classifier(randomState=idx)
# Compute cross validation (5-fold)
scores = self.__cross_val_score(clf, xtest, ytest, cv=5)
print(scores)
print('Avg. CV Score | {} Run {}: {:.2f}'.format(algorithm, idx, round(sum(scores)/len(scores), 4)))
xtest = xtest.drop(['projekt_id'], axis=1)
xtrain = xtrain.drop(['projekt_id'], axis=1)
# Train the model on training sets
clf = clf.fit(xtrain, ytrain)
# Predict on the test sets
prediction = clf.predict(xtest)
# Convert pandas.series to data frame
df_ytest = ytest.to_frame()
# Add run number to df
df_ytest['run'] = idx
xtest['run'] = idx
# add prediction to df
df_ytest['prediction']= prediction
# add result of run to df
df_ytest['correct'] = df_ytest['prediction']==df_ytest['Y']
# add run to run arrays
xTests.append(xtest)
yTests.append(df_ytest)
return xTests, yTests
def __getAccuracies(self, dfys):
res = pd.DataFrame(columns=['accuracy', 'MCC', 'fn_rate'])
for dfy in dfys:
acc = round(accuracy_score(dfy.Y, dfy.prediction), 4)
# f1 = round(f1_score(dfy.Y, dfy.prediction), 4)
mcc = matthews_corrcoef(dfy.Y, dfy.prediction)
matrix = confusion_matrix(dfy.Y, dfy.prediction)
fnr = round(matrix[1][0] / (matrix[1][1] + matrix[1][0]), 4)
# add row to end of df, *100 for better % readability
res.loc[len(res)] = [ acc*100, mcc, fnr*100 ]
return res
def __getConfusionMatices(self, dfys):
res = pd.DataFrame(columns=['tn', 'tp', 'fp', 'fn'])
for dfy in dfys:
# ConfusionMatrix legende:
# [tn, fp]
# [fn, tp]
matrix = confusion_matrix(dfy.Y, dfy.prediction)
res.loc[len(res)] = [ matrix[0][0], matrix[1][1], matrix[0][1], matrix[1][0] ]
# res.loc['sum'] = res.sum() # Summarize each column
return res
def __getIterationMetadata(self, df):
res = {}
res['acc_mean'] = df['accuracy'].mean()
res['acc_median'] = df['accuracy'].median()
res['acc_min'] = df['accuracy'].min()
res['acc_max'] = df['accuracy'].max()
res['acc_quantile_25'] = df['accuracy'].quantile(q=.25)
res['acc_quantile_75'] = df['accuracy'].quantile(q=.75)
res['mcc_mean'] = df['MCC'].mean()
res['mcc_median'] = df['MCC'].median()
res['mcc_min'] = df['MCC'].min()
res['mcc_max'] = df['MCC'].max()
res['mcc_quantile_25'] = df['MCC'].quantile(q=.25)
res['mcc_quantile_75'] = df['MCC'].quantile(q=.75)
res['fn_rate_mean'] = df['fn_rate'].mean()
res['fn_rate_median'] = df['fn_rate'].median()
res['fn_rate_min'] = df['fn_rate'].min()
res['fn_rate_max'] = df['fn_rate'].max()
res['fn_rate_quantile_25'] = df['fn_rate'].quantile(q=.25)
res['fn_rate_quantile_75'] = df['fn_rate'].quantile(q=.75)
res['sample_size_mean'] = (df['fp'] + df['fn'] + df['tn'] + df['tp']).mean()
return res
def __cross_val_score(self, clf, x_values, y_values, cv):
x_and_y_values = pd.concat([y_values, x_values], axis=1)
cross_val_scores = []
for validation_run_index in range(cv):
x_and_y_test, x_and_y_train = self.unique_train_and_test_split(x_and_y_values, random_state=validation_run_index)
# Select all attributes but meldungsnummer
xtest = x_and_y_test.drop(['projekt_id', 'Y'], axis=1)
xtrain = x_and_y_train.drop(['projekt_id', 'Y'], axis=1)
# Only select the response result attributes
ytest = x_and_y_test['Y']
ytrain = x_and_y_train['Y']
clf = clf.fit(xtrain, ytrain)
prediction = clf.predict(xtest)
cross_val_scores.append(accuracy_score(ytest, prediction))
return cross_val_scores
def unique_train_and_test_split(self, df, random_state):
run = shuffle(df, random_state=random_state) # run index as random state
# Get each runs unique meldungsnummer
unique_mn = run.projekt_id.unique()
# Split the meldungsnummer between test and trainings set so there will be no bias in test set
x_unique_test, x_unique_train = train_test_split(unique_mn, test_size=self.config['test_size'], random_state=random_state)
# Add the remaining attributes to meldungsnummer
x_and_y_test = run[run['projekt_id'].isin(x_unique_test)].copy()
x_and_y_train = run[run['projekt_id'].isin(x_unique_train)].copy()
return x_and_y_test, x_and_y_train
# @param val: a value to be casted to numeric
# @return a value that has been casted to an integer. Returns 0 if cast was not possible
def tonumeric(val):
try:
return int(val)
except:
return 0
# @param val: a string value to be categorised
# @return uniffied gatt_wto resulting in either "Yes", "No" or "?"
def unifyYesNo(val):
switcher = {
'Ja': 1,
'Sì': 1,
'Oui': 1,
'Nein': 0,
'Nei': 0,
'Non': 0,
}
return switcher.get(val, 0)
--- FILE SEPARATOR ---
import json
import pandas as pd
import warnings
class Collection():
algorithms = ['gradient_boost', 'decision_tree', 'random_forest']
def __init__(self):
self.list = []
def append(self, item):
self.list.append(item)
def __iter__(self):
return iter(self.list)
def get_all_as_df(self, algorithm):
try:
tmp = []
for iteration in self.list:
tmp.append(iteration[algorithm]['metadata'])
return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list])
except:
warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"')
def df_row_per_algorithm(self):
tmp = []
for iteration in self.list:
for algorithm in self.algorithms:
output = iteration[algorithm]['metadata']
evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data'])
# missing metrics
output['acc_std'] = evaluation_dataframe['accuracy'].std()
evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100
output['mcc_std'] = evaluation_dataframe['MCC'].std()
output['fn_std'] = evaluation_dataframe['fn_rate'].std()
output['anbieter'] = iteration['anbieter']
output['label'] = iteration['label']
output['algorithm'] = algorithm
output['attributes'] = ",".join(iteration['attributes'])
tmp.append(output)
return pd.DataFrame(tmp)
def to_json(self, **kwargs):
return json.dumps(self.list, **kwargs)
def to_file(self, filename):
with open(filename, 'w') as fp:
json.dump(self.list, fp, indent=4, sort_keys=True)
def import_file(self, filename, force=False):
if len(self.list) and not force:
warnings.warn("Loaded Collection, pls add force=True")
else:
with open(filename, 'r') as fp:
self.list = json.load(fp)
|
{
"imported_by": [],
"imports": [
"/train.py",
"/collection.py"
]
}
|
badgerlordy/smash-bros-reader
|
/smash_reader/smash.py
|
from datetime import datetime
import json
from logger import log_exception
import numpy as np
import os
from PIL import Image, ImageTk
import platform
from queue import Queue, Empty
import requests
import smash_game
import smash_utility as ut
import smash_watcher
from sys import argv, excepthook
import time
import tkinter as tk
excepthook = log_exception
TITLE = 'SmashBet Screen Watcher'
output = False
def _print(*args, **kwargs):
if output:
args = list(args)
args.insert(0, '<GUI>')
print(*args, **kwargs)
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
BG = ['#282C34', '#383D48']
FG = ['#9098A6', '#9DA5B4', '#ABB3BF', '#E06C75', '#61AFEF', '#56B6C2', '#98C379']
def config_grids(widget, rows=[], columns=[]):
[widget.rowconfigure(i, weight=weight) for i, weight in enumerate(rows)]
[widget.columnconfigure(i, weight=weight) for i, weight in enumerate(columns)]
class Menubar(tk.Menu):
def __init__(self, master):
super().__init__(master)
self.master = master
self.file_menu = tk.Menu(self, tearoff=0)
# self.file_menu.add_command(label='Load State', command=self.load_state)
# self.file_menu.add_command(label='Save State', command=self.save_state)
# self.file_menu.add_separator()
self.file_menu.add_command(label='Restart', command=self.master.restart)
self.file_menu.add_command(label='Quit', command=self.master.quit)
self.debug_menu = tk.Menu(self, tearoff=0)
self.debug_menu.add_command(label='Clear console', command=ut.clear_console)
self.output_menu = tk.Menu(self, tearoff=0)
self.output_menu.add_command(
label='Silence watcher', command=lambda: self.toggle_output(smash_watcher, 'watcher', 0)
)
self.output_menu.add_command(
label='Silence game', command=lambda: self.toggle_output(smash_game, 'game', 1)
)
self.output_menu.add_command(
label='Silence utility', command=lambda: self.toggle_output(ut, 'utility', 2)
)
self.debug_menu.add_cascade(label='Outputs', menu=self.output_menu)
self.debug_menu.add_separator()
self.debug_menu.add_command(label='Print game data', command=lambda: print(self.master.watcher.game.serialize(images_bool=False)))
self.debug_menu.add_separator()
self.debug_menu.add_command(label='Capture cards_id template', command=ut.capture_cards_id)
self.debug_menu.add_command(label='Character name debugging', command=self.master.character_name_debugging)
self.debug_menu.add_command(label='Click spectate', command=self.master.click_spectate)
self.add_cascade(label='File', menu=self.file_menu)
self.add_cascade(label='Debug', menu=self.debug_menu)
def toggle_output(self, module, name, index):
if module.output:
self.output_menu.entryconfig(index, label=f'Unsilence {name}')
else:
self.output_menu.entryconfig(index, label=f'Silence {name}')
module.output = not module.output
def load_state(self):
path = os.path.join(BASE_DIR, 'game_state.json')
if os.path.isfile(path):
with open(path, 'r') as infile:
return json.load(infile)
else:
return None
def save_state(self):
game = self.master.game
if game:
path = os.path.join(BASE_DIR, 'game_state.json')
with open(path, 'w+') as outfile:
json.dump(game, outfile)
class PlayerFrame(tk.Frame):
def __init__(self, master, player_info, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.info = player_info
config_grids(self, rows=[1, 1], columns=[1, 1])
self.player_number_label = tk.Label(self, text=f'Player {self.info["number"]}', bg=self['background'])
self.player_number_label.grid(row=0, column=0, sticky='nsw', padx=10)
self.character_name_label = tk.Label(
self, text=f'Character: {self.info["character_name"].title()}', bg=self['background']
)
self.character_name_label.grid(row=0, column=1, sticky='nsw', padx=10)
self.gsp_label = tk.Label(self, text=f'GSP: {self.info["gsp"]}', bg=self['background'])
self.gsp_label.grid(row=1, column=0, sticky='nsw', padx=10)
arr = np.array(self.info['player_name_image'])
try:
img = Image.fromarray(arr.astype('uint8'))
img = img.resize((200, 30), Image.NEAREST)
img = img.convert('1').tobitmap()
bitmap = ImageTk.BitmapImage(data=img)
self.player_name_label = tk.Label(self, image=bitmap, bg=self.master['background'])
self.player_name_label.image = bitmap
self.player_name_label.grid(row=1, column=1, sticky='nw', padx=10)
except TypeError:
_print(arr)
_print('Image data corrupted')
try:
ut.dump_image_data(arr)
_print('Image data successfully dumped')
except:
_print('Failed to dump image data')
class TeamFrame(tk.Frame):
def __init__(self, master, team_info, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.info = team_info
self.build_player_frames()
def build_player_frames(self):
COLORS = {
'RED': (252, 208, 197),
'BLUE': (163, 220, 248),
'YELLOW': (246, 237, 166),
'GREEN': (160, 235, 186)
}
if self.info['placement']:
self.placement_label = tk.Label(
self, bg=self['background'], fg=BG[0], text=f'{self.info["placement"]} place'
)
self.info['players'].sort(key=lambda player: player['number'])
player_frames = []
player_len = len(self.info['players'])
self.gsp_label = tk.Label(self, bg=self['background'], fg=BG[0], text=f'Team GSP: {self.info["gsp_total"]}')
self.gsp_label.grid(row=0, column=1, columnspan=player_len, sticky='nsw')
config_grids(self, rows=[1]*(player_len+1), columns=[1, 1])
config_grids(self, rows=[0])
for i, player in enumerate(self.info['players']):
hex_color = ut.rgb_to_hex(COLORS[self.info['color']])
player_frames.append(PlayerFrame(self, player, bg=hex_color))
player_frames[i].grid(row=i+1, column=0, columnspan=2, sticky='nsew', padx=10, pady=(0, 10))
class GameFrame(tk.Frame):
def __init__(self, master, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.game_number = tk.StringVar()
self.game_mode = tk.StringVar()
self.game_map = tk.StringVar()
self.game_duration = tk.StringVar()
config_grids(self, rows=[0, 1], columns=[1])
self.info_frame = tk.Frame(self, bg=BG[0])
config_grids(self.info_frame, rows=[1, 1], columns=[1, 1])
self.info_frame.grid(row=0, column=0, sticky='nsew')
self.game_mode_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_mode)
self.game_mode_label.grid(row=0, column=0, sticky='nsew')
self.game_map_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_map)
self.game_map_label.grid(row=0, column=1, sticky='nsew')
self.game_number_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_number)
self.game_number_label.grid(row=1, column=0, sticky='nsew')
self.game_duration_label = tk.Label(self.info_frame, bg=BG[0], fg=FG[0], textvariable=self.game_duration)
self.game_duration_label.grid(row=1, column=1, sticky='nsew')
def display_info(self):
self.master.game = self.master.watcher.game.serialize()
game = self.master.game
self.game_number.set(f'Game #{game["number"]}')
self.game_map.set(f'Map: {game["map"]}')
self.game_mode.set(f'Mode: {game["mode"]}')
if game['start_time']:
self.game_duration.set(
f'Game began {time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(game["start_time"]))}'
)
elif game['duration']:
self.game_duration.set(f'Game completed in {game["duration"]} seconds')
self.build_team_frames(game)
def build_team_frames(self, game):
color_order = ['RED', 'BLUE', 'YELLOW', 'GREEN']
if hasattr(self, 'teams_frame'):
self.teams_frame.destroy()
self.teams_frame = tk.Frame(self, bg=BG[1])
self.teams_frame.grid(row=1, column=0, sticky='nsew')
team_len = len(game['teams'])
config_grids(self.teams_frame, rows=[1]*team_len, columns=[1])
game['teams'].sort(key=lambda team: color_order.index(team['color']))
team_frames = []
for team_index, team in enumerate(game['teams']):
hex_color = ut.rgb_to_hex(ut.COLORS['CARDS'][team['color']])
team_frames.append(TeamFrame(self.teams_frame, team, bg=hex_color))
team_frames[team_index].grid(row=team_index, column=0, sticky='nsew', pady=(0, 10))
class WatcherFrame(tk.Frame):
def __init__(self, master, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
config_grids(self, rows=[0, 0], columns=[1])
self.toggle_watcher_button = tk.Button(
self, bg=FG[1], fg=BG[1], bd=0, text='Start watcher', command=self.toggle_watcher
)
self.toggle_watcher_button.grid(row=0, column=0, sticky='ew', pady=(0, 5))
self.watcher_status = tk.Label(self, text='Watcher stopped', bg=BG[0], fg=FG[3])
self.watcher_status.grid(row=1, column=0, sticky='ew')
def toggle_watcher(self):
if self.master.watcher.isAlive(): # STOP
self.master.watcher_queue.put('quit')
self.master.watcher.join()
self.toggle_watcher_button.config(text='Start watcher')
self.watcher_status.config(text='Watcher stopped', fg=FG[3])
else: # START
self.master.watcher = smash_watcher.Watcher(self.master.watcher_queue, self.master.queue)
self.master.watcher.start()
self.toggle_watcher_button.config(text='Stop watcher')
self.watcher_status.config(fg=FG[6])
self.master.game_frame.destroy()
self.master.game_frame = GameFrame(self.master, bg=BG[1])
self.master.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10)
class Window(tk.Frame):
def __init__(self, master, *args, **kwargs):
super().__init__(*args, **kwargs)
self.master = master
self.watcher = None
self.cont = True
self.queue = Queue()
self.watcher_queue = Queue()
self.character_name_debugging_enabled = False
self.watcher = smash_watcher.Watcher(self.watcher_queue, self.queue)
self.watcher.daemon = True
self.game = None
self.restart_flag = False
self.pack(fill=tk.BOTH, expand=True)
self.master.title(TITLE)
config_grids(self, rows=[0, 1], columns=[1])
self.game_frame = GameFrame(self, bg=BG[1])
self.game_frame.grid(row=1, column=0, sticky='nsew', padx=10, pady=10)
self.watcher_frame = WatcherFrame(self, bg=BG[0])
self.watcher_frame.grid(row=0, column=0, sticky='nsew', padx=10, pady=10)
self.menubar = Menubar(self)
self.master.config(menu=self.menubar)
self.loop()
def loop(self):
if self.cont:
self.check_queue()
self.master.after(100, self.loop)
def check_queue(self):
try:
item = self.queue.get(block=False)
if item == 'update':
self.game_frame.display_info()
if 'status' in item:
self.watcher_frame.watcher_status.config(text=item['status'])
except Empty:
pass
def quit(self):
self.cont = False
self.master.destroy()
def restart(self):
self.quit()
self.restart_flag = True
def character_name_debugging(self):
if not self.character_name_debugging_enabled:
self.watcher.lock(1)
smash_game.character_name_debugging_enabled = True
else:
self.watcher.unlock()
smash_game.character_name_debugging_enabled = False
self.character_name_debugging_enabled = not self.character_name_debugging_enabled
def click_spectate(self):
self.watcher.game.cancelled = 'DEBUG'
def run_gui():
root = tk.Tk()
root.geometry('540x550')
window = Window(root, bg=BG[0])
if ut.SETTINGS['AUTO_START_WATCHER'].lower() == 'true':
window.watcher_frame.toggle_watcher()
root.mainloop()
if window.watcher.isAlive():
window.watcher_queue.put('quit')
window.watcher.join()
if window.restart_flag:
system = platform.system()
if system == 'Windows':
os.system(__file__)
if system == 'Linux':
os.system('python3 ' + __file__)
def headless():
queue = Queue()
watcher_queue = Queue()
watcher = smash_watcher.Watcher(watcher_queue, queue)
watcher.start()
_input = ''
while _input not in ['stop', 'exit', 'quit']:
_input = input('>: ')
key_capture.put('quit')
key_capture.join()
watcher_queue.put('quit')
watcher.join()
if __name__ == '__main__':
print(f'\n\n{"*" * 40} {TITLE} {"*" * 40}')
print(f'<<<{datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d %H:%M:%S")}>>>')
if len(argv):
if '-nogui' in argv:
headless()
else:
run_gui()
|
from datetime import datetime
import os
from sys import __excepthook__
from time import time
from traceback import format_exception
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
def log_exception(type, value, tb):
error = format_exception(type, value, tb)
filepath = os.path.join(BASE_DIR, 'error.log')
old_text = '\n'
if os.path.isfile(filepath):
with open(filepath, 'r') as logfile:
old_text += logfile.read()
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
line = f'[{timestamp}]\n{("".join(error))}'
new_text = line + old_text
with open(filepath, 'w+') as logfile:
logfile.write(new_text)
__excepthook__(type, value, tb)
|
{
"imported_by": [],
"imports": [
"/smash_reader/logger.py"
]
}
|
badgerlordy/smash-bros-reader
|
/smash_reader/smash_game.py
|
import copy
import difflib
import json
from logger import log_exception
import numpy as np
import os
from PIL import Image
import re
import smash_utility as ut
import sys
import threading
import time
sys.excepthook = log_exception
character_name_debugging_enabled = False
output = True
def _print(*args, **kwargs):
if output:
args = list(args)
args.insert(0, '<Game>')
print(*args, **kwargs)
CARD_WIDTH = 398
STOCK_SPACING = 26
with open('fighter_list.json', 'r') as infile:
CHARACTER_NAMES = json.load(infile)
CHARACTER_NAMES = [name.lower() for name in CHARACTER_NAMES]
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
CHARACTER_NAME_FIXES = {
'lemmy': 'lenny',
'lemmv': 'lenny'
}
MAP_NAME_FIXES = {
'Figure-S': 'Figure-8',
'HiII': 'Hill'
}
class ImageProcessor(threading.Thread):
def __init__(self):
pass
class Player:
def __init__(self):
self.player_name_image = []
self.character_name = ''
self.number = 0
self.gsp = 0
self.stock_template_image = []
self.stock_count = 0
def serialize(self, images_bool=True):
_copy = copy.copy(self)
img = _copy.player_name_image.tolist()
for i, row in enumerate(img):
img[i] = [int(bool(pixel)) for pixel in img[i]]
if not images_bool:
_copy.player_name_image = None
_copy.stock_template_image = None
else:
if len(_copy.player_name_image):
_copy.player_name_image = _copy.player_name_image.tolist()
if len(_copy.stock_template_image):
_copy.stock_template_image = _copy.stock_template_image.tolist()
return _copy.__dict__
def read_card(self, card):
self.get_character_name(card)
self.crop_player_name(card)
self.read_number(card)
self.read_gsp(card)
# @ut.time_this
def get_character_name(self, card):
crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['CHARACTER_NAME'])
pils = ut.stencil(crop)
pil = pils[-1]
template_name, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES'])
if sim > 95:
self.character_name = re.match('(.+)(-\d*)', template_name).group(1)
else:
name_as_read = ut.read_image(pil).lower()
if name_as_read in CHARACTER_NAME_FIXES:
name_as_read = CHARACTER_NAME_FIXES[name_as_read]
name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1)
if len(name):
name = name[0]
if character_name_debugging_enabled:
_template_name, _sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES_DUMP'])
if _sim < 99:
num = 1
for _name in ut.TEMPLATES['CHARACTER_NAMES_DUMP']:
_print(name, _name)
if name in _name:
num += 1
filename = f'{name}-{num}.png'
path = os.path.join(BASE_DIR, 'templates', 'character_names_dump', filename)
pil.save(path)
self.character_name = name
else:
self.character_name = '...'
template, sim = ut.find_most_similar(pil, ut.TEMPLATES['CHARACTER_NAMES'], thresh=95)
if sim >= 95:
self.character_name = template.split('-')[0]
else:
template, sim = ut.find_most_similar(pil, ut.TEMPLATES['UNREADABLE'], thresh=95)
if sim < 95:
nums = list(ut.TEMPLATES['UNREADABLE'].keys())
if len(nums):
nums.sort(key=lambda num: int(num), reverse=True)
num = int(nums[0]) + 1
else:
num = 1
filename = f'{num}.png'
ut.TEMPLATES['UNREADABLE'][num] = pil
pil.save(os.path.join(ut.TEMPLATES_DIR, 'unreadable', filename))
_print(f'{name_as_read.rjust(30)} --> {self.character_name}')
if False:
for i, img in enumerate(pils):
img.save(f'misc/character_names/{self.character_name}-{i}.png')
# @ut.time_this
def crop_player_name(self, card):
crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NAME'])
img, self.player_name_image = ut.convert_to_bw(crop, 120, False)
# @ut.time_this
def read_number(self, card):
crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['NUMBER'])
# crop.save(f'{time.time()}.png')
templates = {t:ut.TEMPLATES['LOBBY'][t] for t in ut.TEMPLATES['LOBBY'] if re.match('P\d+', t)}
template_name, sim = ut.find_most_similar(crop, templates)
num = int(os.path.splitext(template_name)[0].split('P')[1])
# pil, arr = convert_to_bw(crop, 1, False)
# num = read_image(pil, 'player_number')[-1]
# self.number = int(num)
self.number = num
# @ut.time_this
def read_gsp(self, card):
crop = card.crop(ut.COORDS['LOBBY']['PLAYER']['GSP'])
text = ut.read_image(crop, 'gsp')
self.gsp = int(text.replace(',', ''))
class Team:
def __init__(self, color):
self.color = color
self.players = []
self.gsp_total = 0
self.placement = ''
def serialize(self, images_bool=True):
players = [player.serialize(images_bool) for player in self.players]
_copy = copy.copy(self)
_copy.players = players
return _copy.__dict__
def add_player(self, player):
self.players.append(player)
self.gsp_total += player.gsp
class Game:
def __init__(self, num=1):
self.number = num
self.mode = ''
self.map = ''
self.team_mode = False
self.teams = []
self.player_count = 0
self.winning_color = ''
self.start_time = 0
self.duration = 0
self.cancelled = ''
self.colors_changed = False
def serialize(self, images_bool=True):
teams = [team.serialize(images_bool) for team in self.teams]
_copy = copy.copy(self)
_copy.teams = teams
return _copy.__dict__
def load(self, data):
self.__dict__.update(data)
def read_card_screen(self, card_screen):
self.read_basic_info(card_screen)
self.read_cards(card_screen)
@ut.time_this
def read_basic_info(self, screen):
crop = screen.crop(ut.COORDS['LOBBY']['GAME_INFO'])
text = ut.read_image(crop)
splits = text.split(' / ')
self.mode = splits[0]
self.map = splits[1]
for map_str in MAP_NAME_FIXES:
if map_str in self.map:
self.map.replace(map_str, MAP_NAME_FIXES[map_str])
@ut.time_this
def read_cards(self, screen):
# screen.save('screen.png')
id_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_IDS'])
pil, cv = ut.convert_to_bw(id_slice, threshold=220, inv=False)
# pil.save('slice.png')
color_slice = screen.crop(ut.COORDS['LOBBY']['CARDS_SLICE_COLORS'])
id_arr = np.asarray(pil)
color_arr = np.asarray(color_slice)
players = []
skip = 0
id_pixels = [p for row in id_arr for p in row]
color_pixels = [p for row in color_arr for p in row]
players = []
for i, id_pixel in enumerate(id_pixels):
if skip:
skip -= 1
elif id_pixel == 255:
card_boundary = (i - 62, 375, i + 341, 913)
crop = screen.crop(card_boundary)
color = ut.match_color(arr=color_pixels[i - 5], mode='CARDS')[0]
player = Player()
player.read_card(crop)
if player.character_name == '...':
_print('GAME CANCELLED DUE TO UNREADABLE CHARACTER NAME')
self.cancelled = 'UNREADABLE_CHARACTER_NAME'
ut.send_command('b')
else:
players.append(player.character_name)
self.player_count += 1
team = next((t for t in self.teams if t.color == color), None)
if not team:
team = Team(color)
self.teams.append(team)
team.add_player(player)
skip = 340
if len(self.teams) == 2 and self.player_count > 2:
self.team_mode = True
elif len(set(players)) < len(players):
_print('GAME CANCELLED DUE TO DUPLICATE CHARACTER IN FFA')
self.cancelled = 'DUPLICATE_CHARACTER'
ut.send_command('b')
def read_start_screen(self, screen):
time.sleep(1)
screen = ut.capture_screen()
if not self.team_mode and not self.cancelled:
self.colors_changed = self.fix_colors(screen)
if self.mode == 'Stock':
# self.get_stock_templates(screen)
pass
elif self.mode == 'Time':
pass
elif self.mode == 'Stamina':
pass
else:
_print(f'unknown mode: {self.mode}')
# @ut.time_this
def get_stock_templates(self, screen):
stocks = []
for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]:
stock_template_coords = list(ut.COORDS['GAME']['PLAYER']['STOCK_TEMPLATE'])
stock_template_coords[0] = edge - stock_template_coords[0]
stock_template_coords[2] = edge - stock_template_coords[2]
template = screen.crop(stock_template_coords)
player_stock_count = 1
while True:
stock_template_coords[0] += STOCK_SPACING
stock_template_coords[2] += STOCK_SPACING
crop = screen.crop(stock_template_coords)
sim = ut.avg_sim(crop, template)
if sim > 95:
player_stock_count += 1
else:
break
def fix_colors(self, screen):
info = self.get_character_details_game(screen)
players = [player for team in self.teams for player in team.players]
_players = copy.copy(players)
_teams = []
_print('Fixing colors:')
for i, character_info in enumerate(info):
name, color = character_info
player = next((p for p in players if p.character_name == name), None)
team = Team(color)
team.add_player(player)
_teams.append(team)
_print(f'\t{team.color} - {player.character_name}')
for team in self.teams:
color = team.color
character_name = team.players[0].character_name
_team = next((t for t in _teams if t.color == color), None)
if not _team or _team.players[0].character_name != character_name:
self.teams = _teams
return True
return False
def get_character_templates_lobby(self, screen):
characters = []
for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]:
char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLATE'])
char_template_coords[0] = edge - char_template_coords[0]
char_template_coords[2] = edge - char_template_coords[2]
template = screen.crop(char_template_coords)
template.save(f'{time.time()}.png')
def get_character_templates_game(self, screen):
characters = []
for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]:
char_template_coords = list(ut.COORDS['GAME']['PLAYER']['CHARACTER_TEMPLAT'])
char_template_coords[0] = edge - char_template_coords[0]
char_template_coords[2] = edge - char_template_coords[2]
template = screen.crop(char_template_coords)
template.save(f'{time.time()}.png')
def get_character_details_game(self, screen):
info = []
rerun = True
while rerun:
for edge in ut.COORDS['GAME']['PLAYER']['INFO'][self.player_count]:
color_coords = list(ut.COORDS['GAME']['PLAYER']['COLOR'])
color_coords[0] = edge - color_coords[0]
color_coords[2] = edge - color_coords[2]
color_pixel = screen.crop(color_coords)
color, _ = ut.match_color(pixel=color_pixel, mode='GAME')
char_template_coords = list(ut.COORDS['GAME']['PLAYER']['NAME'])
char_template_coords[0] = edge - char_template_coords[0]
char_template_coords[2] = edge - char_template_coords[2]
template = screen.crop(char_template_coords)
bw, _ = ut.convert_to_bw(template)
name_as_read = ut.read_image(bw).lower()
if name_as_read:
rerun = False
if name_as_read in CHARACTER_NAME_FIXES:
name_as_read = CHARACTER_NAME_FIXES[name_as_read]
name = difflib.get_close_matches(name_as_read, CHARACTER_NAMES, n=1)
if len(name):
_print(f'{name_as_read.rjust(30)} --> {name}')
info.append((name[0], color))
else:
trainer_names = ['squirtle', 'charizard', 'ivysaur']
name = difflib.get_close_matches(name_as_read, trainer_names, n=1)
if len(name):
info.append(('pokémon trainer', color))
else:
_print(f'Can\'t read <{name_as_read}>')
# template.show()
# template.save(f'{time.time()}.png')
else:
_print(f'Can\'t read <{name_as_read}>')
return info
def wait_for_go(self):
coords = ut.COORDS['GAME']['']
template = ut.TEMPLATES['IDS']['FIGHT_START']
screen = ut.capture_screen()
crop = screen.crop(coords)
while ut.avg_sim(crop, template) > 85:
screen = ut.capture_screen()
crop = screen.crop(coords)
time.sleep(0.1)
self.start_time = time.time()
def read_end_screen(self, screen):
pass
def read_results_screen(self, screen):
if self.team_mode:
coords = ut.COORDS['FINAL']['VICTORY_TEAM']
templates = ut.TEMPLATES['FINAL']
crop = screen.crop(coords)
sim_template = ut.find_most_similar(crop, templates)
color = sim_template[0].split('_')[0]
self.winning_color = color
_print(self.winning_color)
else:
coords = ut.COORDS['FINAL']
first_place_pixel = screen.crop(coords['VICTORY_PLAYER'])
self.winning_color, sim = ut.match_color(pixel=first_place_pixel, mode='RESULTS')
_print(self.winning_color)
team = next((t for t in self.teams if t.color == self.winning_color), None)
team.placement = '1st'
# print(self.serialize())
|
from datetime import datetime
import os
from sys import __excepthook__
from time import time
from traceback import format_exception
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
def log_exception(type, value, tb):
error = format_exception(type, value, tb)
filepath = os.path.join(BASE_DIR, 'error.log')
old_text = '\n'
if os.path.isfile(filepath):
with open(filepath, 'r') as logfile:
old_text += logfile.read()
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
line = f'[{timestamp}]\n{("".join(error))}'
new_text = line + old_text
with open(filepath, 'w+') as logfile:
logfile.write(new_text)
__excepthook__(type, value, tb)
|
{
"imported_by": [],
"imports": [
"/smash_reader/logger.py"
]
}
|
badgerlordy/smash-bros-reader
|
/smash_reader/smash_utility.py
|
import cv2
from datetime import datetime
import json
from logger import log_exception
import matplotlib.pyplot as plt
import mss
import numpy as np
from PIL import Image, ImageChops, ImageDraw
import pytesseract
import random
import requests
from skimage.measure import compare_ssim
import string
import subprocess
import os
import sys
import time
sys.excepthook = log_exception
output = True
def _print(*args, **kwargs):
if output:
args = list(args)
args.insert(0, '<Utility>')
print(*args, **kwargs)
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
override_path = os.path.join(BASE_DIR, 'index.txt')
if os.path.isfile(override_path):
with open(override_path, 'r') as infile:
MONITOR_INDEX = int(infile.read())
else:
MONITOR_INDEX = 1
COORDS = {
'LOBBY': {
'BASIC_ID': (145, 32, 321, 70),
'FLAGS_ID': (394, 291, 1525, 433),
'CARDS_ID': (671, 152, 1247, 188),
'GAME_INFO': (302, 217, 1443, 253),
'CHARACTER_TEMPLATE': (144, 126, 206, 218),
'CARDS_SLICE_IDS': (0, 877, 1920, 878),
'CARDS_SLICE_COLORS': (0, 813, 1920, 814),
'PLAYER': {
'TEAM_COLOR': (17, 458, 18, 459),
'CHARACTER_NAME': (0, 367, 396, 430),
'NAME': (129, 436, 389, 475),
'NUMBER': (37, 441, 82, 471),
'GSP': (131, 490, 384, 526)
}
},
'GAME': {
'TIMER_PREGAME': (1722, 61, 1798, 89),
'TIMER_VISIBLE': (1703, 63, 1715, 95),
'TIMER_MILLI': (
(1823, 70, 1831, 92),
(1850, 70, 1858, 92)
),
'TIMER_MINUTE': (1675, 54, 1686, 91),
'TIMES_UP': (465, 299, 1451, 409),
'SUDDEN_DEATH': (340, 172, 1602, 345),
'END_ID': (411, 462, 1481, 522),
'PLAYER': {
'INFO': {
2: (712, 1451),
3: (457, 1081, 1705),
4: (491, 899, 1307, 1715)
},
'STOCK_TEMPLATE': (223, 1045, 221, 1059),
'CHARACTER_TEMPLATE': (272, 950, 242, 1020),
'NAME': (182, 1007, 0, 1025),
'COLOR': (5, 1003, 4, 1004)
}
},
'FINAL': {
'ID': (
(468, 49, 550, 296),
(204, 388, 286, 635)
),
'ID2': (1825, 0, 1864, 73),
'VICTORY_TEAM': (745, 870, 833, 978),
'VICTORY_PLAYER': (125, 168, 126, 169),
'2ND_PLACE': (525, 982, 526, 983),
'2ND_PLACE_2_PLAYER': (690, 984, 691, 985),
'3RD_PLACE': (1072, 1003, 1073, 1004),
'4TH_PLACE': (1492, 1013, 1493, 1014)
},
'MENU': {
'FAILED_TO_PLAY_REPLAY': (724, 408, 1185, 485),
'SPECTATE_SELECTED': (979, 458, 1586, 606)
}
}
COLORS = {
'CARDS':{
'RED': (250, 52, 52),
'BLUE': (43, 137, 253),
'YELLOW': (248, 182, 16),
'GREEN': (35, 179, 73)
},
'GAME': {
'RED': (255, 42, 40),
'BLUE': (31, 141 ,255),
'YELLOW': (255, 203, 0),
'GREEN': (22, 193, 64)
},
'RESULTS': {
'RED': (240, 159, 163),
'BLUE': (125, 206, 254),
'YELLOW': (255, 244, 89),
'GREEN': (141, 212, 114)
}
}
folders = [f for f in os.listdir(TEMPLATES_DIR) if os.path.isdir(os.path.join(TEMPLATES_DIR, f))]
TEMPLATES = {f.upper():{} for f in folders}
for root, dirs, files in os.walk(TEMPLATES_DIR, topdown=False):
for file in files:
path = os.path.join(root, file)
name = os.path.splitext(file)[0]
_type = os.path.split(root)[1].upper()
if _type in TEMPLATES:
TEMPLATES[_type][name] = Image.open(path)
else:
TEMPLATES[_type] = {name: Image.open(path)}
def save_settings(settings):
lines = [f'{k}={v}' for k, v in settings.items()]
open('settings.txt', 'w+').write('\n'.join(lines))
def load_settings():
path = os.path.join(BASE_DIR, 'settings.txt')
if os.path.isfile(path):
lines = open(path, 'r').read().splitlines()
settings = {}
for line in lines:
k, v = line.split('=')
settings[k] = v
else:
key_path = os.path.join(BASE_DIR, 'key.txt')
key = ''
if os.path.isfile(key_path):
key = open(key_path, 'r').read().splitlines()[0]
os.remove(key_path)
settings = {
'API_KEY': key,
'POST_URL': 'https://www.smashbet.net/reader_post/',
'AUTO_START_WATCHER': 'true'
}
save_settings(settings)
return settings
SETTINGS = load_settings()
#####################################################################
############################# DECORATORS ############################
#####################################################################
def time_this(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
duration = end_time - start_time
dur_str = '{:.2f}'.format(duration)
_print(f'function: {func.__name__}() executed in {dur_str} seconds')
return result
return wrapper
# Make sure function runs at least as long as the set interval
def pad_time(interval):
def outer(func):
def inner(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
duration = end_time - start_time
delta = interval - duration
if delta > 0:
# print(f'padding {delta} seconds')
time.sleep(delta)
else:
# print(f'detection has fallen behind by [{"{:.2f}".format(delta)}] seconds')
pass
return result
return inner
return outer
#####################################################################
########################## IMAGE CAPTURING ##########################
#####################################################################
def save_frames(vid_path, framerate=None):
print('saving template in 5 seconds')
time.sleep(5)
vid_cap = cv2.VideoCapture(vid_path)
success = True
frame_index = 0
while success:
vid_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
success, image = vid_cap.read()
_print(f'Read frame {frame_index}: ', success)
cv2.imwrite(f'frame{frame_index}.png', image) # save frame as JPEG file
frame_index += 30
# @time_this
def capture_screen(monitor_index=MONITOR_INDEX):
with mss.mss() as sct:
monitor_count = len(sct.monitors)
if monitor_index > monitor_count:
monitor_index = monitor_count
monitor = sct.monitors[monitor_index]
sct_img = sct.grab(monitor)
pil_img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
return pil_img
def capture_cards_id():
coords = COORDS['LOBBY']['CARDS_ID']
cap = capture_screen()
crop = cap.crop(coords)
if 'CARDS_ID' in TEMPLATES['LOBBY']:
del TEMPLATES['LOBBY']['CARDS_ID']
crop.save(os.path.join(TEMPLATES_DIR, 'lobby', 'CARDS_ID.png'))
TEMPLATES['LOBBY']['CARDS_ID'] = crop
#####################################################################
########################## IMAGE PROCESSING #########################
#####################################################################
def read_image(image, config_type='basic'):
configs = {
'basic': '--psm 6 --oem 3',
'gsp': '--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789,',
'player_number': '--psm 8 --oem 3 -c tessedit_char_whitelist=p1234'
}
text = pytesseract.image_to_string(image, config=configs[config_type])
return text
def convert_to_bw(pil_img, threshold=127, inv=True):
cv_img = np.array(pil_img)
try:
img_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
if inv:
method = cv2.THRESH_BINARY_INV
else:
method = cv2.THRESH_BINARY
thresh, array_bw = cv2.threshold(img_gray, threshold, 255, method)
pil_bw = Image.fromarray(array_bw)
return pil_bw, array_bw
except cv2.error:
return pil_img, cv_img
def find_most_similar(sample, templates, thresh=0):
high_sim = ['', 0]
for template_name in templates:
sim = avg_sim(sample, templates[template_name])
if sim > high_sim[1]:
high_sim = [template_name, sim]
if thresh and sim > thresh:
return high_sim
return high_sim
def compare_chops(sample, template, true_color=False):
if sample.size == template.size:
copy1 = sample.resize((64, 64))
copy2 = template.resize((64, 64))
if not true_color:
copy1, arr1 = convert_to_bw(copy1)
copy2, arr2 = convert_to_bw(copy2)
diff = ImageChops.difference(copy1, copy2)
arr = np.asarray(diff)
total = 0
different = 0
for row in arr:
for pixel in row:
total += 1
if isinstance(pixel, (int, np.uint8)):
if pixel == 255:
different += 1
else:
for color in pixel:
different += (color / 255)
sim = ((1 - (different/total)) * 100)
return sim
return 0
def compare_skim(sample, template, true_color=False):
if sample.size == template.size:
copy1 = sample.resize((64, 64))
copy2 = sample.resize((64, 64))
if not true_color:
try:
sample = cv2.cvtColor(np.array(sample), cv2.COLOR_BGR2GRAY)
except cv2.error:
sample = np.array(sample)
try:
template = cv2.cvtColor(np.array(template), cv2.COLOR_BGR2GRAY)
except cv2.error:
template = np.array(template)
# Image is already b&w
sim, diff = compare_ssim(sample, template, full=True, multichannel=True)
return sim * 100
return 0
def area_sim(cap, screen, area):
template = TEMPLATES[screen][area]
coords = COORDS[screen][area]
if not isinstance(coords[0], (list, tuple)):
coords = [coords]
high_sim = 0
for coord in coords:
crop = cap.crop(coord)
sim = avg_sim(crop, template)
if sim > high_sim:
high_sim = sim
return high_sim
def avg_sim(sample, template, true_color=False):
comp_funcs = (compare_chops, compare_skim)
sims = [comp_func(sample, template, true_color) for comp_func in comp_funcs]
avg = sum(sims) / len(sims)
return avg
def match_color(pixel=None, arr=[], mode=None):
best_match = ('', 0)
if not mode:
_print('mode required for color match')
return best_match
if pixel:
sample = [rgb for row in np.asarray(pixel) for rgb in row][0]
elif any(arr):
sample = arr
else:
_print('no sample')
return best_match
colors = COLORS[mode]
for color_name in colors:
diff = 0
for sv, tv in zip(sample, colors[color_name]):
diff += abs(sv - tv)
sim = 100 - ((diff / 765) * 100)
if sim > best_match[1]:
best_match = (color_name, sim)
return best_match
def stencil(crop):
w_pil, w_arr = convert_to_bw(crop, 254, inv=False)
b_pil, _ = convert_to_bw(crop, 1, inv=False)
b_fil = b_pil.copy()
fill_border(b_fil)
b_arr = np.array(b_fil)
result = []
for r1, r2 in zip(w_arr, b_arr):
r = []
for p1, p2 in zip(r1, r2):
if int(p1) and int(p2):
r.append(0)
else:
r.append(255)
result.append(r)
arr = np.array(result)
img = Image.fromarray(arr.astype('uint8'))
imgs = [crop, w_pil, b_pil, b_fil, img]
return imgs
def fill_border(img):
while True:
arr = np.array(img)
row_count = len(arr)
for row_i, row in enumerate(arr):
col_count = len(row)
for p_i, p in enumerate(row):
if int(p):
if row_i == 0 or row_i == row_count \
or p_i == 0 or p_i == col_count:
ImageDraw.floodfill(img, (p_i, row_i), 0)
continue
break
def filter_color(image, color):
color = np.uint8([[color]])
hsv = cv2.cvtColor(color, cv2.COLOR_RGB2HSV)
darker = np.array([hsv[0][0][0] - 10, 50, 50])
lighter = np.array([hsv[0][0][0] + 10, 360, 360])
image = np.asarray(image)
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(hsv, darker, lighter)
result = cv2.bitwise_and(image, image, mask=mask)
return result
def filter_color2(img, coords):
arr = np.array(img)
x, y = coords
pixel = list(arr[y][x])
result = []
for row in arr:
r = []
for p in row:
if list(p) == pixel:
r.append(255)
else:
r.append(0)
result.append(r)
return result
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
#####################################################################
################################ MISC ###############################
#####################################################################
def simplify_players(game):
players = []
for team in game['teams']:
color = team['color']
for player in team['players']:
keys = list(player.keys())
for key in keys:
if not player[key]:
del player[key]
if 'character_name' in player:
player['character_name'] = player['character_name'].title()
player['color'] = color
players.append(player)
return players
def filter_game_data(game, mode):
simple_game = {'reader_mode': mode}
if mode == 1:
simple_game['players'] = simplify_players(game)
simple_game['map'] = game['map']
simple_game['team_mode'] = game['team_mode']
simple_game['game_mode'] = game['mode']
simple_game['cancelled'] = game['cancelled']
if mode == 2:
if not game['team_mode']:
simple_game['colors_changed'] = game['colors_changed']
if game['colors_changed']:
for team in game['teams']:
simple_game['players'] = simplify_players(game)
if mode == 3:
simple_game['start_time'] = -1
if mode == 4:
simple_game['end_time'] = -1
if mode == 5:
simple_game['winning_team'] = game['winning_color']
return simple_game
def post_data(data={}):
key = SETTINGS['API_KEY']
URL = SETTINGS['POST_URL']
DATA = {
'API_KEY': key,
'data': data
}
try:
r = requests.post(url=URL, json=DATA)
return r
except requests.exceptions.ConnectionError:
print('Unable to reach REST API')
return None
def dump_image_data(arr):
filepath = os.path.join(BASE_DIR, 'img_dump.json')
if os.path.isfile(filepath):
with open(filepath, 'r') as infile:
data = json.load(infile)
else:
data = []
data.append({time.time(): arr})
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def clear_console():
try:
none = os.system('cls')
except:
pass
try:
none = os.system('clear')
except:
pass
def save_game_data(game):
data = load_game_data()
data.append(game)
with open('games.json', 'w+') as outfile:
json.dump(data, outfile, separators=(',',':'))
def load_game_data():
path = os.path.join(BASE_DIR, 'games.json')
if os.path.isfile(path):
try:
with open(path, 'r') as infile:
return json.load(infile)
except json.decoder.JSONDecodeError:
pass
return []
def send_command(btn):
_print('PRESS', btn)
os.system(f'PIGPIO_ADDR=raspberrypi.local python3 /home/badgerlord/Desktop/{btn}.py')
def random_str(l=10):
"""Generate a random string of letters, digits and special characters """
password_characters = string.ascii_letters + string.digits
return ''.join(random.choice(password_characters) for i in range(l))
|
from datetime import datetime
import os
from sys import __excepthook__
from time import time
from traceback import format_exception
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
def log_exception(type, value, tb):
error = format_exception(type, value, tb)
filepath = os.path.join(BASE_DIR, 'error.log')
old_text = '\n'
if os.path.isfile(filepath):
with open(filepath, 'r') as logfile:
old_text += logfile.read()
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
line = f'[{timestamp}]\n{("".join(error))}'
new_text = line + old_text
with open(filepath, 'w+') as logfile:
logfile.write(new_text)
__excepthook__(type, value, tb)
|
{
"imported_by": [],
"imports": [
"/smash_reader/logger.py"
]
}
|
badgerlordy/smash-bros-reader
|
/smash_reader/smash_watcher.py
|
import json
from logger import log_exception
import os
from queue import Empty
import re
import requests
import smash_game
import smash_utility as ut
import sys
import threading
import time
sys.excepthook = log_exception
output = True
def _print(*args, **kwargs):
if output:
args = list(args)
args.insert(0, '<Watcher>')
print(*args, **kwargs)
class Watcher(threading.Thread):
def __init__(self, watcher_queue, gui_queue):
# print('\n')
super().__init__()
self.queue = watcher_queue
self.gui_queue = gui_queue
self.id_coords = [
('LOBBY', 'FLAGS_ID'),
('LOBBY', 'CARDS_ID'),
(),
(),
('GAME', 'END_ID'),
('FINAL', 'ID'),
('FINAL', 'ID2')
]
self.locked = False
self.reset()
# Game finished or cancelled
def reset(self):
if not self.locked:
self.current_type_index = 0
self.list_limit = 3
self.sim_lists = [[0] * self.list_limit for _ in range(len(self.id_coords))]
self.cont = True
self.current_game_num = len(ut.load_game_data()) + 1
self.game = smash_game.Game(self.current_game_num)
self.timer_detected = False
self.timer_visible = False
self.timer_running = False
self.timer_running_templates = (None, None)
self.timer_sim_hits = 0
# Starts when watcher is created and loops forever
def run(self):
_print('Watching for flags')
self.gui_queue.put({'status': 'Watching for flag screen'})
while self.cont:
timer_vis_sim = 0
timer_milli_sim = 0
self.cap = ut.capture_screen()
crop = self.cap.crop(ut.COORDS['MENU']['FAILED_TO_PLAY_REPLAY'])
if ut.avg_sim(crop, ut.TEMPLATES['MENU']['FAILED_TO_PLAY_REPLAY']) >= 95:
self.game.cancelled = 'REPLAY_FAILED'
time.sleep(5)
ut.send_command('a')
if self.game.cancelled:
self.reset()
if not self.locked:
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for menu screen'})
self.watch_for_menu()
if not self.locked:
self.gui_queue.put({'status': 'Watching for flag screen'})
# check timer visibility and movement, set class variables
if self.current_type_index >= 2:
timer_vis_sim = self.check_timer_visibility()
timer_milli_sim = 0
if self.timer_detected:
timer_milli_sim = self.check_timer_movement()
# look for the timer at the beginning
if self.current_type_index == 2:
if self.timer_detected:
_print(f'timer detected: {timer_vis_sim}')
self.read_screen_data()
# wait for the timer to start moving
elif self.current_type_index == 3:
if self.timer_running:
_print(f'timer movemement detected: {timer_milli_sim}')
self.read_screen_data()
# check to see if the timer is stopped, or the "GAME" text is
# detected, or the results screen is detected
elif self.current_type_index == 4:
if self.check_screen_basic() > 90:
# pass because read_screen_data will be called if True
# and the rest of the checks will be skipped
pass
else:
# Timer stopped
if not self.timer_running:
self.read_screen_data()
# Results screen detected
else:
checks = [
self.check_screen_basic(index=5, normal=False),
self.check_screen_basic(index=6, normal=False)
]
if sum(checks) / 2 > 80:
# run twice because the match end screen was missed
self.read_screen_data()
self.read_screen_data()
# check for current basic template (flags, cards, results)
else:
self.check_screen_basic()
self.check_queue()
time.sleep(0.1)
def check_queue(self):
if self.queue:
try:
item = self.queue.get(block=False)
if item == 'quit':
self.cont = False
except Empty:
pass
def lock(self, index):
self.current_type_index = index - 1
self.read_screen_data()
self.locked = True
def unlock(self):
self.locked = False
self.reset()
def watch_for_menu(self):
templates = [
ut.TEMPLATES['MENU']['SPECTATE_SELECTED'],
ut.TEMPLATES['LOBBY']['FLAGS_ID']
]
while self.cont:
cap = ut.capture_screen()
self.check_queue()
crop = cap.crop(ut.COORDS['MENU']['SPECTATE_SELECTED'])
if ut.avg_sim(crop, templates[0]) > 95:
time.sleep(5)
ut.send_command('a')
break
crop = cap.crop(ut.COORDS['LOBBY']['FLAGS_ID'])
if ut.avg_sim(crop, templates[1]) > 95:
break
ut.send_command('a')
time.sleep(2)
# @ut.pad_time(0.20)
def check_screen_basic(self, index=-1, normal=True, screen=None, area=None):
if index == -1:
index = self.current_type_index
if not screen and not area:
screen, area = self.id_coords[index]
sim = ut.area_sim(self.cap, screen, area)
l = self.sim_lists[index]
l.insert(0, sim)
del l[-1]
avg = sum(l) / len(l)
if avg > 90:
_print(f'Screen type {{{index}}} sim: {avg}')
if normal:
l = [0] * self.list_limit
self.read_screen_data()
return avg
def check_timer_visibility(self):
timer_vis_crop = self.cap.crop(ut.COORDS['GAME']['TIMER_VISIBLE'])
template = ut.TEMPLATES['GAME']['TIMER_VISIBLE']
timer_vis_sim = ut.avg_sim(timer_vis_crop, template)
if timer_vis_sim > 95:
# _print(f'timer vis sim: {timer_vis_sim}')
if not self.timer_detected:
self.timer_detected = True
self.timer_visible = True
else:
self.timer_visible = False
return timer_vis_sim
def check_timer_movement(self):
timer_sim = 0
if self.timer_visible:
coords = ut.COORDS['GAME']['TIMER_MILLI']
crops = [self.cap.crop(coord) for coord in coords]
# [crop.show() for crop in crops]
if all(self.timer_running_templates):
timer_sim = sum([ut.avg_sim(t, c) for t, c in zip(self.timer_running_templates, crops)]) / 2
# for i, crop in enumerate(crops):
# timer_sim = ut.avg_sim(crop, self.timer_running_templates[i]) / (i + 1)
if timer_sim > 90:
_print(f'timer sim: {timer_sim}')
self.timer_sim_hits += 1
if self.timer_sim_hits >= 3:
if self.timer_running:
# self.read_screen_data()
self.timer_running = False
else:
self.timer_running = True
self.timer_sim_hits = 0
self.timer_running_templates = crops
return timer_sim
def battle_watcher(self):
pass
def filter_and_post(self, game):
data = {
'game': ut.filter_game_data(
game,
self.current_type_index
),
'mode': self.current_type_index
}
ut.post_data(data)
def read_screen_data(self):
qp = lambda: self.filter_and_post(self.game.serialize(images_bool=False))
# Flags
if self.current_type_index == 0:
self.gui_queue.put('update')
_print('Flags detected')
self.gui_queue.put({'status': 'Watching for card screen'})
# Cards
if self.current_type_index == 1:
_print('Cards detected')
self.gui_queue.put({'status': 'Reading cards'})
time.sleep(1)
self.cap = ut.capture_screen()
self.game.read_card_screen(self.cap)
qp()
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for battle pregame'})
# Pregame
if self.current_type_index == 2:
_print('Battle pregame detected')
self.game.read_start_screen(self.cap)
qp()
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for battle start'})
# Game started
if self.current_type_index == 3:
_print('Battle start detected')
qp()
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for battle end'})
# Game ended
if self.current_type_index == 4:
_print('Battle end detected')
qp()
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for battle results'})
# Results
if self.current_type_index == 5:
_print('Battle results detected')
self.game.read_results_screen(self.cap)
qp()
self.gui_queue.put('update')
self.gui_queue.put({'status': 'Watching for flag screen'})
# ut.save_game_data(self.game.serialize())
if not self.locked:
self.current_type_index += 1
if self.current_type_index >= 6:
self.reset()
_print(f'Mode changed to {self.current_type_index}')
# _print(json.dumps(self.game.serialize(), separators=(',', ': ')))
|
from datetime import datetime
import os
from sys import __excepthook__
from time import time
from traceback import format_exception
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
def log_exception(type, value, tb):
error = format_exception(type, value, tb)
filepath = os.path.join(BASE_DIR, 'error.log')
old_text = '\n'
if os.path.isfile(filepath):
with open(filepath, 'r') as logfile:
old_text += logfile.read()
timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
line = f'[{timestamp}]\n{("".join(error))}'
new_text = line + old_text
with open(filepath, 'w+') as logfile:
logfile.write(new_text)
__excepthook__(type, value, tb)
|
{
"imported_by": [],
"imports": [
"/smash_reader/logger.py"
]
}
|
radrumond/hidra
|
/archs/fcn.py
|
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import os
import numpy as np
import tensorflow as tf
from archs.maml import MAML
class Model(MAML):
def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size)
def dense_weights(self):
weights = {}
cells = {}
initializer = tf.contrib.layers.xavier_initializer()
print("Creating/loading Weights")
divider = 1
inic = 1
filters = 64
finals = 64
if self.isMIN:
divider = 2
inic = 3
finals = 800
filters = 32
with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer)
weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant)
"""weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )"""
print("Done Creating/loading Weights")
return weights, cells
def forward(self,x,weights, training):
conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
# print(conv4)
# bn = tf.squeeze(conv4,axis=(1,2))
bn = tf.layers.Flatten()(conv4)
# tf.reshape(bn, [3244,234])
fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"])
# bn = tf.reshape(bn,[-1,])
return fc1
|
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import os
import numpy as np
import tensorflow as tf
class MAML:
def __init__(self,train_lr,meta_lr,image_shape, isMIN, label_size=2):
self.train_lr = train_lr
self.meta_lr = meta_lr
self.image_shape = image_shape
self.isMIN = isMIN
self.saver = None
self.label_size = label_size
self.finals = 64
self.maml_n = 1
if isMIN:
self.finals = 800
def build(self, K, meta_batchsz, mode='train'):
# Meta batch of tasks
self.train_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
self.train_yb = tf.placeholder(tf.float32, [None,None,None])
self.val_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
self.val_yb = tf.placeholder(tf.float32, [None,None,None])
self.label_n = tf.placeholder(tf.int32 , 1, name="num_labs")
#Initialize weights
self.weights, self.cells = self.dense_weights()
training = True if mode is 'train' else False
# Handle one task update
def meta_task(inputs):
train_x, train_y, val_x, val_y = inputs
val_preds, val_losses = [], []
train_pred = self.forward(train_x, self.weights, training)
train_loss = tf.losses.softmax_cross_entropy(train_y,train_pred)
grads = tf.gradients(train_loss, list(self.weights.values()))
gvs = dict(zip(self.weights.keys(), grads))
a=[self.weights[key] - self.train_lr * gvs[key] for key in self.weights.keys()]
# for key in self.weights.keys():
# print(key, gvs[key])
fast_weights = dict(zip(self.weights.keys(),a))
# Validation after each update
val_pred = self.forward(val_x, fast_weights, training)
val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
# record T0 pred and loss for meta-test
val_preds.append(val_pred)
val_losses.append(val_loss)
# continue to build T1-TK steps graph
for _ in range(1, K):
# Update weights on train data of task t
loss = tf.losses.softmax_cross_entropy(train_y,self.forward(train_x, fast_weights, training))
grads = tf.gradients(loss, list(fast_weights.values()))
gvs = dict(zip(fast_weights.keys(), grads))
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.train_lr * gvs[key] for key in fast_weights.keys()]))
# Evaluate validation data of task t
val_pred = self.forward(val_x, fast_weights, training)
val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
val_preds.append(val_pred)
val_losses.append(val_loss)
result = [train_pred, train_loss, val_preds, val_losses]
return result
out_dtype = [tf.float32, tf.float32,[tf.float32] * K, [tf.float32] * K]
result = tf.map_fn(meta_task, elems=(self.train_xb, self.train_yb, self.val_xb, self.val_yb),
dtype=out_dtype, parallel_iterations=meta_batchsz, name='map_fn')
train_pred_tasks, train_loss_tasks, val_preds_tasks, val_losses_tasks = result
if mode is 'train':
self.train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
self.val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
self.val_predictions = val_preds_tasks
optimizer = tf.train.AdamOptimizer(self.meta_lr, name='meta_optim')
gvs = optimizer.compute_gradients(self.val_losses[-1])
gvs = [(tf.clip_by_norm(grad, 10), var) for grad, var in gvs]
self.meta_op = optimizer.apply_gradients(gvs)
else:
self.test_train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
self.test_val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
self.val_predictions = val_preds_tasks
self.saving_weights = tf.trainable_variables()
def conv_layer(self, x, W, b, name, strides=1):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
x = tf.nn.bias_add(x, b)
return x
def fc_layer(self,x, name, weights=None, biases=None):
with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
fc = tf.matmul(x, weights)
fc = tf.nn.bias_add(fc, biases)
return fc
def loadWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
if self.saver == None:
z = self.saving_weights
#print("KEYS:", z.keys())
self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
saver = self.saver
checkpoint_path = modeldir + f"{name}/"+model_name +"-" + step
if os.path.isfile(checkpoint_path+".marker"):
saver.restore(sess, checkpoint_path)
print('The checkpoint has been loaded.')
else:
print(checkpoint_path+".marker not found. Starting from scratch.")
def saveWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
if self.saver == None:
z = self.saving_weights
self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
saver = self.saver
checkpoint_path = modeldir + f"{name}/"+model_name
if not os.path.exists(modeldir):
os.makedirs(modeldir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
open(checkpoint_path+"-"+str(int(step))+".marker", 'a').close()
def dense_weights(self):
return
def forward(self,x,weights, training):
return
|
{
"imported_by": [
"/main.py"
],
"imports": [
"/archs/maml.py"
]
}
|
radrumond/hidra
|
/main.py
|
## Created by Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
from archs.fcn import Model as mfcn
from archs.hydra import Model as mhyd
from train import *
from test import *
from args import argument_parser, train_kwargs, test_kwargs
import random
args = argument_parser().parse_args()
random.seed(args.seed)
t_args = train_kwargs(args)
e_args = test_kwargs (args)
print("########## argument sheet ########################################")
for arg in vars(args):
print (f"#{arg:>15} : {str(getattr(args, arg))} ")
print("##################################################################")
print("Loading Data...")
if args.dataset in ["Omniglot", "omniglot", "Omni", "omni"]:
loader = OmniChar_Gen (args.data_path)
isMIN = False
shaper = [28,28,1]
elif args.dataset in ["miniimagenet", "MiniImageNet", "mini"]:
loader = MiniImgNet_Gen(args.data_path)
isMIN = True
shaper = [84,84,3]
else:
raise ValueError("INVALID DATA-SET NAME!")
print("Building Model...")
if args.arch == "fcn"or args.arch == "maml":
print("SELECTED: MAML")
m = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
mt = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
#elif args.arch == "rnn":
# m = mrnn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.min_classes)
elif args.arch == "hydra" or args.arch == "hidra":
print("SELECTED: HIDRA")
m = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
mt = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
else:
raise ValueError("INVALID Architecture NAME!")
mode = "train"
if args.test:
mode = "test"
print("Starting Test Step...")
mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode=mode)
test (mt, loader, **e_args)
else:
modeltest = None
if args.testintrain:
mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode="test")
modeltest = mt
print("Starting Train Step...")
m.build (K = args.train_inner_K, meta_batchsz = args.meta_batch, mode=mode)
train(m, modeltest, loader, **t_args)
|
"""
Command-line argument parsing.
"""
import argparse
#from functools import partial
import time
import tensorflow as tf
import json
import os
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
return s == 'True'
def argument_parser():
"""
Get an argument parser for a training script.
"""
file_time = int(time.time())
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--arch', help='name architecture', default="fcn", type=str)
parser.add_argument('--seed', help='random seed', default=0, type=int)
parser.add_argument('--name', help='name add-on', type=str, default='Model_config-'+str(file_time))
parser.add_argument('--dataset', help='data set to evaluate on', type=str, default='Omniglot')
parser.add_argument('--data_path', help='path to data folder', type=str, default='/home/')
parser.add_argument('--config', help='json config file', type=str, default=None)
parser.add_argument('--checkpoint', help='checkpoint directory', default='model_checkpoint')
parser.add_argument('--test', help='Testing or Not', action='store_true')
parser.add_argument('--testintrain', help='Testing during train or Not', action='store_true')
parser.add_argument('--min_classes', help='minimum number of classes for n-way', default=2, type=int)
parser.add_argument('--max_classes', help='maximum (excluded) number of classes for n-way', default=2, type=int)
parser.add_argument('--ttrain_shots', help='number of examples per class in meta train', default=5, type=int)
parser.add_argument('--ttest_shots', help='number of examples per class in meta test', default=15, type=int)
parser.add_argument('--etrain_shots', help='number of examples per class in meta train', default=5, type=int)
parser.add_argument('--etest_shots', help='number of examples per class in meta test', default=15, type=int)
parser.add_argument('--train_inner_K', help='number of inner gradient steps during meta training', default=5, type=int)
parser.add_argument('--test_inner_K', help='number of inner gradient steps during meta testing', default=5, type=int)
parser.add_argument('--learning_rate', help='Adam step size for inner training', default=0.4, type=float)
parser.add_argument('--meta_step', help='meta-training step size', default=0.01, type=float)
parser.add_argument('--meta_batch', help='meta-training batch size', default=1, type=int)
parser.add_argument('--meta_iters', help='meta-training iterations', default=70001, type=int)
parser.add_argument('--eval_iters', help='meta-training iterations', default=2000, type=int)
parser.add_argument('--step', help='Checkpoint step to load', default=59999, type=float)
# python main_emb.py --meta_step 0.005 --meta_batch 8 --learning_rate 0.3 --test --checkpoint Model_config-1568818723
args = vars(parser.parse_args())
#os.system("mkdir -p " + args['checkpoint'])
if args['config'] is None:
args['config'] = f"{args['checkpoint']}/{args['name']}/{args['name']}.json"
print(args['config'])
# os.system("mkdir -p " + f"{args['checkpoint']}")
os.system("mkdir -p " + f"{args['checkpoint']}/{args['name']}")
with open(args['config'], 'w') as write_file:
print("Json Dumping...")
json.dump(args, write_file)
else:
with open(args['config'], 'r') as open_file:
args = json.load(open_file)
return parser
def train_kwargs(parsed_args):
"""
Build kwargs for the train() function from the parsed
command-line arguments.
"""
return {
'min_classes': parsed_args.min_classes,
'max_classes': parsed_args.max_classes,
'train_shots': parsed_args.ttrain_shots,
'test_shots': parsed_args.ttest_shots,
'meta_batch': parsed_args.meta_batch,
'meta_iters': parsed_args.meta_iters,
'test_iters': parsed_args.eval_iters,
'train_step' : parsed_args.step,
'name': parsed_args.name,
}
def test_kwargs(parsed_args):
"""
Build kwargs for the train() function from the parsed
command-line arguments.
"""
return {
'eval_step' : parsed_args.step,
'min_classes': parsed_args.min_classes,
'max_classes': parsed_args.max_classes,
'train_shots': parsed_args.etrain_shots,
'test_shots': parsed_args.etest_shots,
'meta_batch': parsed_args.meta_batch,
'meta_iters': parsed_args.eval_iters,
'name': parsed_args.name,
}
--- FILE SEPARATOR ---
import numpy as np
import os
import cv2
import pickle
class MiniImgNet_Gen:
def __init__(self,path="/tmp/data/miniimagenet",data_path=None):
if data_path is None:
self.path = path
self.train_paths = ["train/"+x for x in os.listdir(path+"/train")]
self.test_paths = ["test/"+x for x in os.listdir(path+"/test")]
self.val_paths = ["val/"+x for x in os.listdir(path+"/val")]
self.data_path = data_path
self.meta_train = None
self.meta_test = None
self.meta_val = None
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
print('Loading MiniImagenet data...')
if training == "train":
if self.meta_train is None:
meta_data = []
for idx,im_class in enumerate(self.train_paths):
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_train = meta_data
else:
meta_data = self.meta_train
elif training == "val":
if self.meta_val is None:
meta_data = []
for idx,im_class in enumerate(self.val_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_val = meta_data
else:
meta_data = self.meta_val
elif training == "test":
if self.meta_test is None:
meta_data = []
for idx,im_class in enumerate(self.test_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_test = meta_data
else:
meta_data = self.meta_test
else:
raise ValueError("Training needs to be train, val or test")
print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}')
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
while True:
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# sample fixed number classes for a meta batch
nr_classes = np.random.randint(min_class,max_class)
for mb in range(mb_size):
# select which classes in the meta batch
classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False)
train_x = []
train_y = []
test_x = []
test_y = []
for label_nr,cl in enumerate(classes):
images = np.random.choice(len(meta_data[cl]),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(meta_data[cl][train_imgs])
test_x.append(meta_data[cl][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,84,84,3])
test_x = np.reshape(test_x,[-1,84,84,3])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
# print('YIEEEEEEELDING')
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
# Initiates the Omniglot dataset and splits into meta train and meta task
class OmniChar_Gen:
def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None):
self.path = path
self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")]
self.lens = {}
for task in self.tasks:
self.lens[task] = len(os.listdir(self.path+task))
self.meta_data = []
print("Loading Omniglot data")
for idx,task in enumerate(range(len(self.tasks))):
if idx%10==0:
print(f"Loading tasks {idx}/{len(self.tasks)}")
data = []
for char in os.listdir(self.path+self.tasks[task]):
c = []
for img in os.listdir(self.path+self.tasks[task]+"/"+char):
c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img))
data.append(c)
self.meta_data.append(data)
self.meta_data = np.concatenate(self.meta_data)
print("Finished loading data")
if test_idx==None:
self.train_idx = list(range(len(self.meta_data)))
np.random.shuffle(self.train_idx)
self.test_idx = self.train_idx[1200:]
self.train_idx = self.train_idx[:1200]
print("Test_idx:",self.test_idx)
else:
self.test_idx = test_idx
self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx))
# Builds a generator that samples meta batches from meta training/test data
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
if training == "train":
idx = self.train_idx
elif training == "test":
idx = self.test_idx
else:
raise ValueError("Omniglot only supports train and test for training param")
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
## We can remove this later and make it dynamic
while True:
image_idx = idx.copy()
np.random.shuffle(image_idx)
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# Roll number of classes in the mb
nr_classes = np.random.randint(min_class,max_class)
for task in range(mb_size):
train_x = []
train_y = []
test_x = []
test_y = []
# Sample the characters for the task
chars = np.random.choice(image_idx,nr_classes,False)
# Sample the shots for each character
for label_nr,char in enumerate(chars):
images = np.random.choice(range(20),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(self.meta_data[char][train_imgs])
test_x.append(self.meta_data[char][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,28,28,1])
test_x = np.reshape(test_x,[-1,28,28,1])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
def getOrder(minClass,maxClass,mb_size,number_chars=1200):
# gives a list integers between minClass and maxClass that sum up to 1200,
lens = []
sums = 0
while sums<=number_chars-minClass*mb_size:
maxV = int((number_chars-sums)/mb_size)+1
n=np.random.randint(minClass,min(maxV,maxClass))
lens += [n]*mb_size
sums = sums+(n*mb_size)
return lens
def readImg(path,size=[28,28],rgb=False):
img = cv2.imread(path)
img = cv2.resize(img,(size[0],size[1])).astype(float)
if np.max(img)>1.0:
img /= 255.
if not rgb:
return img[:,:,:1]
else:
if len(img.shape)==3:
if img.shape[-1]!=3:
print('ASFASFASFAS')
print(img.shape)
print(path)
return img
else:
return np.reshape([img,img,img],[size[0],size[1],3])
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def loadImgDir(path,size,rgb):
imgs = []
for img in os.listdir(path):
imgs.append(readImg(path+"/"+img,size,rgb))
return imgs
--- FILE SEPARATOR ---
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import os
import numpy as np
import tensorflow as tf
from archs.maml import MAML
class Model(MAML):
def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size)
def dense_weights(self):
weights = {}
cells = {}
initializer = tf.contrib.layers.xavier_initializer()
print("Creating/loading Weights")
divider = 1
inic = 1
filters = 64
finals = 64
if self.isMIN:
divider = 2
inic = 3
finals = 800
filters = 32
with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer)
weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant)
"""weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )"""
print("Done Creating/loading Weights")
return weights, cells
def forward(self,x,weights, training):
conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
# print(conv4)
# bn = tf.squeeze(conv4,axis=(1,2))
bn = tf.layers.Flatten()(conv4)
# tf.reshape(bn, [3244,234])
fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"])
# bn = tf.reshape(bn,[-1,])
return fc1
--- FILE SEPARATOR ---
# ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
# THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
import numpy as np
import tensorflow as tf
from archs.maml2 import MAML
def getBin(l=10):
x_ = 2
n = 1
while x_ < l:
x_ = x_* 2
n += 1
numbers = []
for i in range(l):
num = []
for j in list('{0:0b}'.format(i+1).zfill(n)):
num.append(int(j))
numbers.append(num)
return numbers
class Model(MAML):
def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
super().__init__(train_lr,meta_lr,image_shape,isMIN, label_size)
self.finals = 64
if isMIN:
self.finals = 800
def getBin(self, l=10):
x_ = 2
n = 1
while x_ < l:
x_ = x_* 2
n += 1
numbers = []
for i in range(l):
num = []
for j in list('{0:0b}'.format(i+1).zfill(n)):
num.append(int(j))
numbers.append(num)
return numbers
def dense_weights(self):
weights = {}
cells = {}
initializer = tf.contrib.layers.xavier_initializer()
divider = 1
inic = 1
filters = 64
self.finals = 64
if self.isMIN:
print("\n\n\n\n\n\n\n\n\nIS MIN\n\n\n\n\n\n\n\n\n\n\n")
divider = 2
inic = 3
self.finals = 800
filters = 32
with tf.variable_scope('MASTER', reuse= tf.AUTO_REUSE):
cells['d_1'] = tf.get_variable('MASTER_d_1w', [self.finals,1], initializer = initializer)
cells['b_1'] = tf.get_variable('MASTER_d_1b', [1], initializer=tf.initializers.constant)
with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
for i in range (self.max_labels):
weights['d_1w'+str(i)] = tf.get_variable('d_1w'+str(i), [self.finals,1], initializer = initializer)
weights['b_1w'+str(i)] = tf.get_variable('d_1b'+str(i), [1], initializer=tf.initializers.constant)
return weights, cells
def forward(self,x,weights, training):
# with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
conv1 = tf.nn.relu(conv1)
conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
conv2 = tf.nn.relu(conv2)
conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
conv3 = tf.nn.relu(conv3)
conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
conv4 = tf.nn.relu(conv4)
conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
bn = tf.layers.Flatten()(conv4)
agg = [self.fc_layer(bn,"dense"+str(i),weights["d_1w"+str(i)],weights["b_1w"+str(i)]) for i in range(self.max_labels)]
fc1 = tf.concat(agg, axis=-1)[:,:self.label_n[0]]
return fc1
|
{
"imported_by": [],
"imports": [
"/args.py",
"/data_gen/omni_gen.py",
"/archs/fcn.py",
"/archs/hydra.py"
]
}
|
radrumond/hidra
|
/test.py
|
import numpy as np
import tensorflow as tf
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
def test(m, data_sampler,
eval_step,
min_classes,
max_classes,
train_shots,
test_shots,
meta_batch,
meta_iters,
name):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
losses=[]
temp_yp = []
aps = []
buffer = []
lossesB=[]
train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test")
print("TEST MODE")
m.loadWeights(sess, name, step = str(int(eval_step)), model_name=name+".ckpt")
for i in range(meta_iters):
xb1,yb1,xb2,yb2 = next(train_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
if m.maml_n == 2:
sess.run(m.init_assign, feed_dict={m.label_n:[5]})
l,vals,ps=sess.run([m.test_train_loss,m.test_val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
m.train_yb: yb1,
m.val_xb:xb2,
m.val_yb:yb2,
m.label_n:num_l})
losses.append(vals)
lossesB.append(vals)
buffer.append(l)
true_vals = np.argmax(yb2,axis=-1)
all_accs = []
for pred_epoch in range(len(ps)):
all_accs.append(np.mean(np.argmax(ps[pred_epoch],axis=-1)==true_vals))
temp_yp.append(all_accs)
# if i%1==0:
if i%50==0:
print(f"({i}/{meta_iters})")
print(f"Final: TLoss {np.mean(buffer)}, VLoss {np.mean(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}" )
print(f"Final: TLoss {np.mean(buffer)}-{np.std(buffer)}, VLoss {np.mean(lossesB,axis=0)}-{np.std(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}-{np.std(temp_yp,axis=0)}" )
|
import numpy as np
import os
import cv2
import pickle
class MiniImgNet_Gen:
def __init__(self,path="/tmp/data/miniimagenet",data_path=None):
if data_path is None:
self.path = path
self.train_paths = ["train/"+x for x in os.listdir(path+"/train")]
self.test_paths = ["test/"+x for x in os.listdir(path+"/test")]
self.val_paths = ["val/"+x for x in os.listdir(path+"/val")]
self.data_path = data_path
self.meta_train = None
self.meta_test = None
self.meta_val = None
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
print('Loading MiniImagenet data...')
if training == "train":
if self.meta_train is None:
meta_data = []
for idx,im_class in enumerate(self.train_paths):
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_train = meta_data
else:
meta_data = self.meta_train
elif training == "val":
if self.meta_val is None:
meta_data = []
for idx,im_class in enumerate(self.val_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_val = meta_data
else:
meta_data = self.meta_val
elif training == "test":
if self.meta_test is None:
meta_data = []
for idx,im_class in enumerate(self.test_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_test = meta_data
else:
meta_data = self.meta_test
else:
raise ValueError("Training needs to be train, val or test")
print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}')
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
while True:
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# sample fixed number classes for a meta batch
nr_classes = np.random.randint(min_class,max_class)
for mb in range(mb_size):
# select which classes in the meta batch
classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False)
train_x = []
train_y = []
test_x = []
test_y = []
for label_nr,cl in enumerate(classes):
images = np.random.choice(len(meta_data[cl]),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(meta_data[cl][train_imgs])
test_x.append(meta_data[cl][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,84,84,3])
test_x = np.reshape(test_x,[-1,84,84,3])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
# print('YIEEEEEEELDING')
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
# Initiates the Omniglot dataset and splits into meta train and meta task
class OmniChar_Gen:
def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None):
self.path = path
self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")]
self.lens = {}
for task in self.tasks:
self.lens[task] = len(os.listdir(self.path+task))
self.meta_data = []
print("Loading Omniglot data")
for idx,task in enumerate(range(len(self.tasks))):
if idx%10==0:
print(f"Loading tasks {idx}/{len(self.tasks)}")
data = []
for char in os.listdir(self.path+self.tasks[task]):
c = []
for img in os.listdir(self.path+self.tasks[task]+"/"+char):
c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img))
data.append(c)
self.meta_data.append(data)
self.meta_data = np.concatenate(self.meta_data)
print("Finished loading data")
if test_idx==None:
self.train_idx = list(range(len(self.meta_data)))
np.random.shuffle(self.train_idx)
self.test_idx = self.train_idx[1200:]
self.train_idx = self.train_idx[:1200]
print("Test_idx:",self.test_idx)
else:
self.test_idx = test_idx
self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx))
# Builds a generator that samples meta batches from meta training/test data
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
if training == "train":
idx = self.train_idx
elif training == "test":
idx = self.test_idx
else:
raise ValueError("Omniglot only supports train and test for training param")
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
## We can remove this later and make it dynamic
while True:
image_idx = idx.copy()
np.random.shuffle(image_idx)
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# Roll number of classes in the mb
nr_classes = np.random.randint(min_class,max_class)
for task in range(mb_size):
train_x = []
train_y = []
test_x = []
test_y = []
# Sample the characters for the task
chars = np.random.choice(image_idx,nr_classes,False)
# Sample the shots for each character
for label_nr,char in enumerate(chars):
images = np.random.choice(range(20),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(self.meta_data[char][train_imgs])
test_x.append(self.meta_data[char][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,28,28,1])
test_x = np.reshape(test_x,[-1,28,28,1])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
def getOrder(minClass,maxClass,mb_size,number_chars=1200):
# gives a list integers between minClass and maxClass that sum up to 1200,
lens = []
sums = 0
while sums<=number_chars-minClass*mb_size:
maxV = int((number_chars-sums)/mb_size)+1
n=np.random.randint(minClass,min(maxV,maxClass))
lens += [n]*mb_size
sums = sums+(n*mb_size)
return lens
def readImg(path,size=[28,28],rgb=False):
img = cv2.imread(path)
img = cv2.resize(img,(size[0],size[1])).astype(float)
if np.max(img)>1.0:
img /= 255.
if not rgb:
return img[:,:,:1]
else:
if len(img.shape)==3:
if img.shape[-1]!=3:
print('ASFASFASFAS')
print(img.shape)
print(path)
return img
else:
return np.reshape([img,img,img],[size[0],size[1],3])
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def loadImgDir(path,size,rgb):
imgs = []
for img in os.listdir(path):
imgs.append(readImg(path+"/"+img,size,rgb))
return imgs
|
{
"imported_by": [],
"imports": [
"/data_gen/omni_gen.py"
]
}
|
radrumond/hidra
|
/train.py
|
import numpy as np
import tensorflow as tf
from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
import time
def train( m, mt, # m is the model foir training, mt is the model for testing
data_sampler, # Creates the data generator for training and testing
min_classes, # minimum amount of classes
max_classes, # maximum || || ||
train_shots, # number of samples per class (train)
test_shots, # number of samples per class (test)
meta_batch, # Number of tasks
meta_iters, # Number of iterations
test_iters, # Iterations in Test
train_step,
name): # Experiment name for experiments
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# bnorms = [v for v in tf.global_variables() if "bn" in v.name]
#---------Performance Tracking lists---------------------------------------
losses = []
temp_yp = []
temp_ypn= []
nls = []
aps = []
buffer = []
lossesB = []
#--------------------------------------------------------------------------
#---------Load train and test data-sets------------------------------------
train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"train")
if mt is not None:
test_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test" )
m.loadWeights(sess, name, step=str(int(train_step)), model_name=name+".ckpt")
#--------------------------------------------------------------------------
#TRAIN LOOP
print("Starting meta training:")
start = time.time()
for i in range(meta_iters):
xb1,yb1,xb2,yb2 = next(train_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
if m.maml_n == 2: # in case it uses hydra master node, it should re-assign the output nodes from the master
sess.run(m.init_assign, feed_dict={m.label_n:[5]})
l,_,vals,ps=sess.run([m.train_loss,m.meta_op,m.val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
m.train_yb: yb1,
m.val_xb:xb2,
m.val_yb:yb2,
m.label_n:num_l})
if m.maml_n == 2: # in case it uses hydra master node, it should update the master
sess.run(m.final_assign,feed_dict={m.label_n:num_l})
losses.append(vals)
lossesB.append(vals)
buffer.append(l)
#Calculate accuaracies
aux = []
tmp_pred = np.argmax(np.reshape(ps[-1],[-1,num_l[0]]),axis=-1)
tmp_true = np.argmax(np.reshape(yb2,[-1,num_l[0]]),axis=-1)
for ccci in range(num_l[0]):
tmp_idx = np.where(tmp_true==ccci)[0]
#print(tmp_idx)
aux.append(np.mean(tmp_pred[tmp_idx]==tmp_true[tmp_idx]))
temp_yp.append(np.mean(tmp_pred==tmp_true))
temp_ypn.append(aux)
#EVALUATE and PRINT
if i%100==0:
testString = ""
#If we give a test model, it will test using the weights from train
if mt is not None and i%1000==0:
lossestest = []
buffertest = []
lossesBtest = []
temp_yptest = []
for z in range(100):
if m.maml_n == 2:
sess.run(mt.init_assign, feed_dict={mt.label_n:[5]})
xb1,yb1,xb2,yb2 = next(test_gen)
num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
l,vals,ps=sess.run([mt.test_train_loss,mt.test_val_losses,mt.val_predictions],feed_dict={mt.train_xb: xb1,
mt.train_yb: yb1,
mt.val_xb:xb2,
mt.val_yb:yb2,
mt.label_n:num_l})
lossestest.append(vals)
lossesBtest.append(vals)
buffertest.append(l)
temp_yptest.append(np.mean(np.argmax(ps[-1],axis=-1)==np.argmax(yb2,axis=-1)))
testString = f"\n TEST: TLoss {np.mean(buffertest):.3f} VLoss {np.mean(lossesBtest,axis=0)[-1]:.3f}, ACCURACY {np.mean(temp_yptest):.4f}"
print(f"Epoch {i}: TLoss {np.mean(buffer):.4f}, VLoss {np.mean(lossesB,axis=0)[-1]:.4f},",
f"Accuracy {np.mean(temp_yp):.4}", f", Per label acc: {[float('%.4f' % elem) for elem in aux]}", f"Finished in {time.time()-start}s",testString)
buffer = []
lossesB = []
temp_yp = []
start = time.time()
# f"\n TRUE: {yb2}\n PRED: {ps}")
if i%5000==0:
print("Saving...")
m.saveWeights(sess, name, i, model_name=name+".ckpt")
m.saveWeights(sess, name, i, model_name=name+".ckpt")
|
import numpy as np
import os
import cv2
import pickle
class MiniImgNet_Gen:
def __init__(self,path="/tmp/data/miniimagenet",data_path=None):
if data_path is None:
self.path = path
self.train_paths = ["train/"+x for x in os.listdir(path+"/train")]
self.test_paths = ["test/"+x for x in os.listdir(path+"/test")]
self.val_paths = ["val/"+x for x in os.listdir(path+"/val")]
self.data_path = data_path
self.meta_train = None
self.meta_test = None
self.meta_val = None
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
print('Loading MiniImagenet data...')
if training == "train":
if self.meta_train is None:
meta_data = []
for idx,im_class in enumerate(self.train_paths):
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_train = meta_data
else:
meta_data = self.meta_train
elif training == "val":
if self.meta_val is None:
meta_data = []
for idx,im_class in enumerate(self.val_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_val = meta_data
else:
meta_data = self.meta_val
elif training == "test":
if self.meta_test is None:
meta_data = []
for idx,im_class in enumerate(self.test_paths):
# print(idx)
meta_data.append(np.array(loadImgDir(self.path+"/"+im_class,[84,84],rgb=True)))
self.meta_test = meta_data
else:
meta_data = self.meta_test
else:
raise ValueError("Training needs to be train, val or test")
print(f'Finished loading MiniImagenet data: {np.array(meta_data).shape}')
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
while True:
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# sample fixed number classes for a meta batch
nr_classes = np.random.randint(min_class,max_class)
for mb in range(mb_size):
# select which classes in the meta batch
classes = np.random.choice(range(len(meta_data)),nr_classes,replace=False)
train_x = []
train_y = []
test_x = []
test_y = []
for label_nr,cl in enumerate(classes):
images = np.random.choice(len(meta_data[cl]),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(meta_data[cl][train_imgs])
test_x.append(meta_data[cl][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(classes))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(classes))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,84,84,3])
test_x = np.reshape(test_x,[-1,84,84,3])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
# print('YIEEEEEEELDING')
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
# Initiates the Omniglot dataset and splits into meta train and meta task
class OmniChar_Gen:
def __init__(self,path="/tmp/data/omniglot",data_path=None,test_idx=None):
self.path = path
self.tasks = ["/images_background/"+x for x in os.listdir(path+"/images_background")]+["/images_evaluation/"+x for x in os.listdir(path+"/images_evaluation")]
self.lens = {}
for task in self.tasks:
self.lens[task] = len(os.listdir(self.path+task))
self.meta_data = []
print("Loading Omniglot data")
for idx,task in enumerate(range(len(self.tasks))):
if idx%10==0:
print(f"Loading tasks {idx}/{len(self.tasks)}")
data = []
for char in os.listdir(self.path+self.tasks[task]):
c = []
for img in os.listdir(self.path+self.tasks[task]+"/"+char):
c.append(readImg(self.path+self.tasks[task]+"/"+char+"/"+img))
data.append(c)
self.meta_data.append(data)
self.meta_data = np.concatenate(self.meta_data)
print("Finished loading data")
if test_idx==None:
self.train_idx = list(range(len(self.meta_data)))
np.random.shuffle(self.train_idx)
self.test_idx = self.train_idx[1200:]
self.train_idx = self.train_idx[:1200]
print("Test_idx:",self.test_idx)
else:
self.test_idx = test_idx
self.train_idx = list(set(list(range(len(self.meta_data)))) - set(self.test_idx))
# Builds a generator that samples meta batches from meta training/test data
def sample_Task(self,mb_size, min_class,max_class,train_size,test_size,training="train",shuffle=True):
if training == "train":
idx = self.train_idx
elif training == "test":
idx = self.test_idx
else:
raise ValueError("Omniglot only supports train and test for training param")
if min_class < 2:
raise ValueError("Minimum number of classes must be >=2")
## We can remove this later and make it dynamic
while True:
image_idx = idx.copy()
np.random.shuffle(image_idx)
meta_train_x = []
meta_train_y = []
meta_test_x = []
meta_test_y = []
# Roll number of classes in the mb
nr_classes = np.random.randint(min_class,max_class)
for task in range(mb_size):
train_x = []
train_y = []
test_x = []
test_y = []
# Sample the characters for the task
chars = np.random.choice(image_idx,nr_classes,False)
# Sample the shots for each character
for label_nr,char in enumerate(chars):
images = np.random.choice(range(20),train_size+test_size,False)
train_imgs = images[:train_size]
test_imgs = images[train_size:]
train_x.append(self.meta_data[char][train_imgs])
test_x.append(self.meta_data[char][test_imgs])
train_y.append(np.ones(train_size)*label_nr)
test_y.append(np.ones(test_size)*label_nr)
train_x = np.array(train_x)
train_y = np.eye(len(chars))[np.reshape(np.array(train_y),-1).astype(int)]
test_x = np.array(test_x)
test_y = np.eye(len(chars))[np.reshape(np.array(test_y),-1).astype(int)]
train_x = np.reshape(train_x,[-1,28,28,1])
test_x = np.reshape(test_x,[-1,28,28,1])
if shuffle:
train_x,train_y = unison_shuffled_copies(train_x,train_y)
test_x,test_y = unison_shuffled_copies(test_x,test_y)
meta_train_x.append(train_x)
meta_train_y.append(train_y)
meta_test_x.append(test_x)
meta_test_y.append(test_y)
yield meta_train_x,meta_train_y,meta_test_x,meta_test_y
def getOrder(minClass,maxClass,mb_size,number_chars=1200):
# gives a list integers between minClass and maxClass that sum up to 1200,
lens = []
sums = 0
while sums<=number_chars-minClass*mb_size:
maxV = int((number_chars-sums)/mb_size)+1
n=np.random.randint(minClass,min(maxV,maxClass))
lens += [n]*mb_size
sums = sums+(n*mb_size)
return lens
def readImg(path,size=[28,28],rgb=False):
img = cv2.imread(path)
img = cv2.resize(img,(size[0],size[1])).astype(float)
if np.max(img)>1.0:
img /= 255.
if not rgb:
return img[:,:,:1]
else:
if len(img.shape)==3:
if img.shape[-1]!=3:
print('ASFASFASFAS')
print(img.shape)
print(path)
return img
else:
return np.reshape([img,img,img],[size[0],size[1],3])
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def loadImgDir(path,size,rgb):
imgs = []
for img in os.listdir(path):
imgs.append(readImg(path+"/"+img,size,rgb))
return imgs
|
{
"imported_by": [],
"imports": [
"/data_gen/omni_gen.py"
]
}
|
sebastianden/alpaca
|
/src/alpaca.py
|
import warnings
warnings.simplefilter(action='ignore')
import pickle
import pandas as pd
import numpy as np
from utils import TimeSeriesScalerMeanVariance, Flattener, Featuriser, plot_dtc
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_curve, auc
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from tslearn.clustering import TimeSeriesKMeans
from tslearn.neighbors import KNeighborsTimeSeriesClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from IPython.display import SVG
from tensorflow.keras.utils import model_to_dot
from tensorflow.keras.utils import plot_model
class Alpaca(ClassifierMixin):
"""
A learning product classification algorithm.
"""
def __init__(self):
self.anomaly_detection = AnomalyDetection()
self.classifier = Classifier()
def fit(self, X, y, stacked=True):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection
# Do GridSearch to get best model
param_grid = {'n_clusters': [10,50,100,200]}
grid = GridSearchCV(self.anomaly_detection, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\ad.csv",index=False)
print(grid.best_params_)
# Take best model
self.anomaly_detection = grid.best_estimator_
# Save the model
with open("models\\ad.pkl", 'wb') as file:
pickle.dump(self.anomaly_detection, file)
# Fit ensemble classifier
self.classifier.fit(X, y, stacked)
return self
def predict(self, X, voting):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions from ensemble with suggested class labels
y_pred_bin: array, shape (n_samples,)
Combined binary predictions
"""
# Class predictions of ensemble
y_pred, y_pred_ens = self.classifier.predict(X, voting=voting)
# Binary predictions of anomaly detector
y_pred_ad = self.anomaly_detection.predict(X)
# Save individual predictions
y_pred_indiv = np.column_stack((y_pred_ens, y_pred_ad)).astype(int)
df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_dtc','y_pred_svc','y_pred_cnn','y_pred_ad'])
df_results.to_csv("results\\y_pred_indiv.csv",index=False)
# Overwrite the entries in y_pred_knn with positive, where ensemble decides positive
y_pred_bin = np.where(y_pred != 0, 1, y_pred_ad)
return y_pred_bin, y_pred
class AnomalyDetection(ClassifierMixin, BaseEstimator):
"""
Anomaly detection with 1-NN and automatic calculation of optimal threshold.
"""
def __init__(self, n_clusters=200):
self.knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, weights='uniform', metric='euclidean', n_jobs=-1)
self.d = None
self.n_clusters = n_clusters
def fit(self, X, y):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection knn over k-means centroids
X_good = X[np.where(y == 0)]
X_bad = X[np.where(y != 0)]
km = TimeSeriesKMeans(n_clusters=self.n_clusters, metric="euclidean",
max_iter=100, random_state=0, n_jobs=-1).fit(X_good)
self.knn.fit(km.cluster_centers_, np.zeros((self.n_clusters,)))
# Calculate distances to all samples in good and bad
d_bad, _ = self.knn.kneighbors(X_bad)
d_good, _ = self.knn.kneighbors(X_good)
# Calculate ROC
y_true = np.hstack((np.zeros(X_good.shape[0]), np.ones(X_bad.shape[0])))
y_score = np.vstack((d_good, d_bad))
fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=1)
# Determine d by Youden index
self.d = thresholds[np.argmax(tpr - fpr)]
return self
def predict(self, X):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
"""
# Binary predictions of anomaly detector
y_pred = np.squeeze(np.where(self.knn.kneighbors(X)[0] < self.d, 0, 1))
return y_pred
class Classifier(ClassifierMixin):
"""
Classifier part with ensemble of estimators.
"""
def __init__(self):
# DTC pipeline
featuriser = Featuriser()
dtc = DecisionTreeClassifier()
self.dtc_pipe = Pipeline([('featuriser', featuriser), ('dtc', dtc)])
# SVC pipeline
scaler = TimeSeriesScalerMeanVariance(kind='constant')
flattener = Flattener()
svc = SVC()
self.svc_pipe = Pipeline([('scaler', scaler), ('flattener', flattener), ('svc', svc)])
# Keras pipeline
#len_filter = round(len_input*0.05)
#num_filter = 8
cnn = KerasClassifier(build_fn=build_cnn, epochs=100, verbose=0)
self.cnn_pipe = Pipeline([('scaler', scaler), ('cnn', cnn)])
# Meta classifier
self.meta_dtc = DecisionTreeClassifier()
self.meta_svc = SVC()
def fit(self, X, y, stacked):
"""
Fit each individual estimator of the ensemble model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit DTC
# Do GridSearch to get best model
param_grid = {'featuriser__windows': [1, 2, 3, 4, 5, 6],
'dtc__max_depth': [3, 4, 5],
'dtc__criterion': ['gini', 'entropy']}
grid = GridSearchCV(self.dtc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\dtc.csv",index=False)
print(grid.best_params_)
# Take best model
self.dtc_pipe = grid.best_estimator_
# Plot the dtc
#plot_dtc(self.dtc_pipe['dtc'])
# Save the model
with open("models\\dtc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit SVC
# Do GridSearch to get best model
param_grid = {'svc__C': [10, 100, 1000, 10000],
'svc__gamma': [0.01, 0.001, 0.0001, 0.00001],
'svc__degree': [2, 3],
'svc__kernel': ['rbf', 'linear', 'poly']}
grid = GridSearchCV(self.svc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\svc.csv",index=False)
print(grid.best_params_)
# Take best model
self.svc_pipe = grid.best_estimator_
# Save the model
with open("models\\svc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit CNN
# Do GridSearch to get best model
param_grid = {'cnn__num_channels':[X.shape[2]],
'cnn__len_input':[X.shape[1]],
'cnn__num_classes':[np.unique(y).shape[0]],
'cnn__batch_size': [20, 30],
'cnn__num_filter': [4, 8, 16],
'cnn__num_layer': [1, 2],
'cnn__len_filter': [0.05, 0.1, 0.2]} # len_filter is defined as fraction of input_len
grid = GridSearchCV(self.cnn_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\cnn.csv",index=False)
print(grid.best_params_)
# Take best model
self.cnn_pipe = grid.best_estimator_
# Save the model
self.cnn_pipe['cnn'].model.save("models\\cnn.h5")
# Fit the Metaclassifiers
if stacked:
# Get level 1 classifier predictions as training data
X_stacked, y_stacked = kfoldcrossval(self, X, y, k=5)
# Fit Meta DTC
self.meta_dtc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_dtc.pkl", 'wb') as file:
pickle.dump(self.meta_dtc, file)
# Fit Meta SVC
self.meta_svc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_svc.pkl", 'wb') as file:
pickle.dump(self.meta_svc, file)
return self
def predict(self, X, voting='veto'):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
y_pred_ens: array, shape (n_samples, 3)
Predictions of the individual estimators
"""
y_pred = np.empty(np.shape(X)[0])
# Parallelize this part
y_dtc = self.dtc_pipe.predict(X)
y_svc = self.svc_pipe.predict(X)
y_cnn = self.cnn_pipe.predict(X)
y_pred_ens = np.stack([y_dtc, y_svc, y_cnn], axis=1).astype(int)
if voting == 'veto':
for i in range(np.shape(X)[0]):
if y_dtc[i] == y_svc[i] == y_cnn[i]:
y_pred[i] = y_dtc[i]
else:
y_pred[i] = -1
if voting == 'democratic':
for i in range(np.shape(X)[0]):
y_pred[i] = np.argmax(np.bincount(y_pred_ens[i, :]))
if voting == 'meta_dtc':
y_pred = self.meta_dtc.predict(y_pred_ens)
if voting == 'meta_svc':
y_pred = self.meta_svc.predict(y_pred_ens)
return y_pred, y_pred_ens
def kfoldcrossval(model, X, y, k=5):
"""
Performs another cross-validation with the optimal models in order to
get the level 1 predictions to train the meta classifier.
Parameters
----------
model: object
Ensemble classifier object
X : array-like of shape (n_samples, n_features, n_channels)
Samples.
y : array-like of shape (n_samples,)
True labels for X.
k: int
Number of splits
Returns
-------
X_stack: array-like of shape (n_samples, n_features)
Level 1 predictions as training data for metaclassifier
y_stack: array-like of shape (n_samples,)
Targets for metaclassifier
"""
kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=42)
X_stack = np.empty((0, 3))
y_stack = np.empty((0,))
# Make a copy of the already fitted classifiers (to not overwrite the originals)
dtc_temp = clone(model.dtc_pipe)
svc_temp = clone(model.svc_pipe)
cnn_temp = clone(model.cnn_pipe)
# Train classifiers agin in kfold crossvalidation to get level 1 predictions
for train, test in kfold.split(X, y):
# Train all models on train
dtc_temp.fit(X[train], y[train])
svc_temp.fit(X[train], y[train])
cnn_temp.fit(X[train], y[train])
# Test all on test
y0 = dtc_temp.predict(X[test])
y1 = svc_temp.predict(X[test])
y2 = cnn_temp.predict(X[test])
# Concatenate predictions of individual classifier
a = np.stack((y0, y1, y2), axis=-1).astype(int)
# Concatenate with predictions from other splits
X_stack = np.vstack((X_stack, a))
y_stack = np.hstack((y_stack, y[test]))
return X_stack, y_stack
def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input, num_classes):
"""
Function returning a keras model.
Parameters
----------
num_filter: int
Number of filters / kernels in the conv layer
len_filter: float
Length of the filters / kernels in the conv layer as fraction of inputlength
num_layer: int
Number of convlutional layers in the model
num_channels: int
Number of channels of the input
len_input: int
Number of dimensions of the input
num_classes: int
Number of classes in the dataset = Number of outputs
Returns
-------
model: sequential keras model
Keras CNN model ready to be trained
"""
model = Sequential()
# First Conv Layer
model.add(Conv1D(filters=num_filter, kernel_size=int(len_filter*len_input), strides=1, padding="same",
activation='relu', input_shape=(len_input, num_channels), name='block1_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block1_pool'))
# Other Conv Layers
for l in range(2, num_layer + 1):
model.add(Conv1D(filters=num_filter*l, kernel_size=int(len_filter * len_input), strides=1, padding="same",
activation='relu', name='block' + str(l) + '_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block' + str(l) + '_pool'))
model.add(Flatten(name='flatten'))
model.add(Dense(100, activation='relu', name='fc1'))
model.add(Dense(num_classes, activation='softmax',name='predictions'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
plot_model(model,dpi = 300, show_shapes=True, to_file='models\\cnn.png')
return model
|
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from scipy.stats import kurtosis, skew
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn import tree
import graphviz
# Load the testbench data
def load_test():
df = pd.read_pickle('data\\df_test.pkl')
pivoted = df.pivot(index='sample_nr',columns='idx')
X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2)
y = df.groupby('sample_nr').target.first().values
return X, y
# Load any dataset (WARNING: predefined length!)
def load_data(dataset):
if dataset == 'test':
X, y = load_test()
sz = 230
elif dataset == 'uc1':
X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = 38
elif dataset == 'uc2':
X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
# Length of timeseries for resampler and cnn
sz = 200
resampler = TimeSeriesResampler(sz=sz)
X = resampler.fit_transform(X, y)
y = np.array(y)
return X, y
# Load and split UC1 and UC2 datasets
def split_df(df,index_column, feature_columns, target_name):
labels = []
features = []
for id_, group in df.groupby(index_column):
features.append(group[feature_columns].values.tolist())
labels.append(group[target_name].iloc[0])
return features, labels
# Function to plot confusion matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
"""
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Matplotlib 3.1.1 bug workaround
ax.set_ylim(len(cm)-0.5, -0.5)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def to_time_series_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features)
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
numpy.ndarray of shape
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
try:
np.array(dataset, dtype=np.float)
except ValueError:
raise AssertionError('All elements must have the same length.')
if np.array(dataset[0]).ndim == 0:
dataset = [dataset]
if np.array(dataset[0]).ndim == 1:
no_time_samples = len(dataset[0])
no_features = 1
else:
no_time_samples, no_features = np.array(dataset[0]).shape
return np.array(dataset, dtype=np.float).reshape(
len(dataset),
no_time_samples,
no_features)
def to_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features) where no_time_samples
for different time sereies can be different.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
list of np.arrays
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
if np.array(dataset[0]).ndim == 0:
dataset = [[d] for d in dataset]
if np.array(dataset[0]).ndim == 1:
no_features = 1
dataset = [[[d] for d in data] for data in dataset]
else:
no_features = len(dataset[0][0])
for data in dataset:
try:
array = np.array(data, dtype=float)
except ValueError:
raise AssertionError(
"All samples must have the same number of features!")
assert array.shape[-1] == no_features,\
'All series must have the same no features!'
return dataset
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
no_output_samples : int
Size of the output time series.
"""
def __init__(self, sz):
self._sz = sz
def fit(self, X, y=None, **kwargs):
return self
def _interp(self, x):
return np.interp(
np.linspace(0, 1, self._sz),
np.linspace(0, 1, len(x)),
x)
def transform(self, X, **kwargs):
X_ = to_dataset(X)
res = [np.apply_along_axis(self._interp, 0, x) for x in X_]
return to_time_series_dataset(res)
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension. The mean and std can either be
constant (one value per feature over all times) or time varying (one value
per time step per feature).
Parameters
----------
kind: str (one of 'constant', or 'time-varying')
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
"""
def __init__(self, kind='constant', mu=0., std=1.):
assert kind in ['time-varying', 'constant'],\
'axis should be one of time-varying or constant'
self._axis = (1, 0) if kind == 'constant' else 0
self.mu_ = mu
self.std_ = std
def fit(self, X, y=None, **kwargs):
X_ = to_time_series_dataset(X)
self.mean_t = np.mean(X_, axis=self._axis)
self.std_t = np.std(X_, axis=self._axis)
self.std_t[self.std_t == 0.] = 1.
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_
return X_
class Flattener(TransformerMixin):
"""Flattener for time series. Reduces the dataset by one dimension by
flattening the channels"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Flattened time series dataset
"""
X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1)
return X_
class Differentiator(TransformerMixin):
"""Calculates the derivative of a specified channel and and appends
it as new channel"""
def __init__(self, channel):
"""Initialise Featuriser.
Parameters
----------
channel
int, channel to calculate derivative from
"""
self.channel = channel
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset
Returns
-------
numpy.ndarray
Time series dataset with new channel
"""
dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel])
X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2)
return X
class Featuriser(TransformerMixin, BaseEstimator):
"""Featuriser for time series. Calculates a set of statistical measures
on each channel and each defined window of the dataset and returns a
flattened matrix to train sklearn models on"""
def __init__(self, windows=1):
"""Initialise Featuriser.
Parameters
----------
windows
int, number of windows to part the time series in
"""
self.windows = windows
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
for window in np.array_split(X[:, :, i], self.windows, axis=1):
mean = np.mean(window, axis=1)
std = np.std(window, axis=1)
min_d = np.min(window, axis=1)
min_loc = np.argmin(window, axis=1)
max_d = np.max(window, axis=1)
max_loc = np.argmax(window, axis=1)
# Concatenate all values to a numpy array
row = [mean, std, min_d, min_loc, max_d, max_loc]
row = np.transpose(np.vstack(row))
X_ = np.hstack([X_, row])
return X_
class Featuriser2(TransformerMixin):
"""Deprecated. Featuriser for time series. Calculates a set of statistical measures
on each channel of the dataset and returns a flattened matrix to train
sklearn models on"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
table = np.empty((0, 14))
for x in X[:, :, i]:
mean = np.mean(x)
var = np.var(x)
max_d = x.max()
max_loc = np.argmax(x)
min_d = x.min()
min_loc = np.argmin(x)
range_d = max_d - min_d
med = np.median(x)
first = x[0]
last = x[-1]
skew_d = skew(x)
kurt = kurtosis(x)
sum = np.sum(x)
mean_abs_change = np.mean(np.abs(np.diff(x)))
# Concatenate all values to a numpy array
row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum,
mean_abs_change]
row = np.hstack(row)
table = np.vstack([table, row])
X_ = np.hstack((X_,table))
return X_
class Cutter(TransformerMixin):
"""Cuts the last part of the curves."""
def fit(self, X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
list
Cut time series dataset
"""
res = []
for x in X:
idx = np.argmax(np.array(x)[:, 0])
res.append(x[:idx])
return res
def plot_dtc(dtc):
feature_names = []
#channels = ["$pos","$vel","$cur"] # test case
#channels = ["$pos","$cur"] # use case 1
#channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity
channels = ["$pos","$for"] # use case 2
for var in channels:
for i in range(1,int((dtc.n_features_/6/len(channels))+1)):
for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]:
feature_names.append('{0}^{1}_{2}'.format(var,i,f))
#target_names = ["0","1","2","3","4"] # test case
target_names = ["0","1","2","3"] # use case 1 + 2
dot_data = tree.export_graphviz(dtc, out_file=None,
feature_names=feature_names,
class_names=target_names,
filled=False, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'svg'
graph.render("models\\dtc")
|
{
"imported_by": [
"/src/test_time.py",
"/src/test_use_case.py",
"/src/main.py",
"/src/test_voting.py"
],
"imports": [
"/src/utils.py"
]
}
|
sebastianden/alpaca
|
/src/cam.py
|
import tensorflow.keras.backend as K
import tensorflow.keras
from tensorflow.keras.layers import Lambda
from tensorflow.keras.models import Model, load_model
tensorflow.compat.v1.disable_eager_execution()
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from utils import to_time_series_dataset, split_df, load_test, TimeSeriesResampler, TimeSeriesScalerMeanVariance
from scipy.interpolate import interp1d
import seaborn as sns
sns.set(style='white',font='Palatino Linotype',font_scale=1,rc={'axes.grid' : False})
def get_model(id):
model = load_model('.\\models\\cam_cnn_'+id+'.h5')
return model
def target_category_loss(x, category_index, nb_classes):
return tf.multiply(x, K.one_hot([category_index], nb_classes))
def target_category_loss_output_shape(input_shape):
return input_shape
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def load_data(dataset):
if dataset == 'test':
X, y = load_test()
sz = 230
elif dataset == 'uc1':
X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = 38
elif dataset == 'uc2':
X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
# Length of timeseries for resampler and cnn
sz = 200
resampler = TimeSeriesResampler(sz=sz)
X = resampler.fit_transform(X, y)
y = np.array(y)
return X, y
def get_sample(X, y, label, rs=100):
s = np.random.RandomState(rs)
s = s.choice(np.where(y == label)[0], 1)
x_raw = to_time_series_dataset(X[s, :, :])
scaler = TimeSeriesScalerMeanVariance(kind='constant')
X = scaler.fit_transform(X)
x_proc = to_time_series_dataset(X[s, :, :])
return x_proc, x_raw
def _compute_gradients(tensor, var_list):
grads = tf.gradients(tensor, var_list)
return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]
def grad_cam(input_model, data, category_index, nb_classes, layer_name):
# Lambda function for getting target category loss
target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
# Lambda layer for function
x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output)
# Add Lambda layer as output to model
model = Model(inputs=input_model.input, outputs=x)
#model.summary()
# Function for getting target category loss y^c
loss = K.sum(model.output)
# Get the layer with "layer_name" as name
conv_output = [l for l in model.layers if l.name == layer_name][0].output
# Define function to calculate gradients
grads = normalize(_compute_gradients(loss, [conv_output])[0])
gradient_function = K.function([model.input], [conv_output, grads])
# Calculate convolution layer output and gradients for datasample
output, grads_val = gradient_function([data])
output, grads_val = output[0, :], grads_val[0, :, :]
# Calculate the neuron importance weights as mean of gradients
weights = np.mean(grads_val, axis = 0)
# Calculate CAM by multiplying weights with the respective output
cam = np.zeros(output.shape[0:1], dtype = np.float32)
for i, w in enumerate(weights):
cam += w * output[:, i]
# Interpolate CAM to get it back to the original data resolution
f = interp1d(np.linspace(0, 1, cam.shape[0]), cam, kind="slinear")
cam = f(np.linspace(0,1,data.shape[1]))
# Apply ReLU function to only get positive values
cam[cam < 0] = 0
return cam
def plot_grad_cam(cam, raw_input, cmap, alpha, language='eng'):
fig, ax = plt.subplots(raw_input.shape[-1], 1, figsize=(15, 9), sharex=True)
# fig.suptitle('Gradient Class Activation Map for sample of class %d' %predicted_class)
if language == 'eng':
ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Velocity $\mathit{v}$ in m/s", r"Current $\mathit{I}$ in A"]
if language == 'ger':
ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Geschwindigkeit $\mathit{v}$ in m/s", r"Stromstärke $\mathit{I}$ in A"]
for i, a in enumerate(ax):
left, right = (-1, raw_input.shape[1] + 1)
range_input = raw_input[:, :, i].max() - raw_input[:, :, i].min()
down, up = (raw_input[:, :, i].min() - 0.1 * range_input, raw_input[:, :, i].max() + 0.1 * range_input)
a.set_xlim(left, right)
a.set_ylim(down, up)
a.set_ylabel(ax_ylabel[i])
im = a.imshow(cam.reshape(1, -1), extent=[left, right, down, up], aspect='auto', alpha=alpha, cmap=cmap)
a.plot(raw_input[0, :, i], linewidth=2, color='k')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(im, cax=cbar_ax)
if language == 'eng':
cbar_ax.set_ylabel('Activation', rotation=90, labelpad=15)
if language == 'ger':
cbar_ax.set_ylabel('Aktivierung', rotation=90, labelpad=15)
return ax
if __name__ == "__main__":
X, y = load_data('test')
nb_classes = np.unique(y).shape[0]
# Load model and datasample
preprocessed_input, raw_input = get_sample(X, y, label=1)
model = get_model('test')
# Get prediction
predictions = model.predict(preprocessed_input)
predicted_class = np.argmax(predictions)
print('Predicted class: ', predicted_class)
# Calculate Class Activation Map
cam = grad_cam(model, preprocessed_input, predicted_class, nb_classes, 'block2_conv1')
ax = plot_grad_cam(cam, raw_input, 'jet', 1)
plt.show()
|
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from scipy.stats import kurtosis, skew
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn import tree
import graphviz
# Load the testbench data
def load_test():
df = pd.read_pickle('data\\df_test.pkl')
pivoted = df.pivot(index='sample_nr',columns='idx')
X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2)
y = df.groupby('sample_nr').target.first().values
return X, y
# Load any dataset (WARNING: predefined length!)
def load_data(dataset):
if dataset == 'test':
X, y = load_test()
sz = 230
elif dataset == 'uc1':
X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = 38
elif dataset == 'uc2':
X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
# Length of timeseries for resampler and cnn
sz = 200
resampler = TimeSeriesResampler(sz=sz)
X = resampler.fit_transform(X, y)
y = np.array(y)
return X, y
# Load and split UC1 and UC2 datasets
def split_df(df,index_column, feature_columns, target_name):
labels = []
features = []
for id_, group in df.groupby(index_column):
features.append(group[feature_columns].values.tolist())
labels.append(group[target_name].iloc[0])
return features, labels
# Function to plot confusion matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
"""
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Matplotlib 3.1.1 bug workaround
ax.set_ylim(len(cm)-0.5, -0.5)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def to_time_series_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features)
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
numpy.ndarray of shape
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
try:
np.array(dataset, dtype=np.float)
except ValueError:
raise AssertionError('All elements must have the same length.')
if np.array(dataset[0]).ndim == 0:
dataset = [dataset]
if np.array(dataset[0]).ndim == 1:
no_time_samples = len(dataset[0])
no_features = 1
else:
no_time_samples, no_features = np.array(dataset[0]).shape
return np.array(dataset, dtype=np.float).reshape(
len(dataset),
no_time_samples,
no_features)
def to_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features) where no_time_samples
for different time sereies can be different.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
list of np.arrays
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
if np.array(dataset[0]).ndim == 0:
dataset = [[d] for d in dataset]
if np.array(dataset[0]).ndim == 1:
no_features = 1
dataset = [[[d] for d in data] for data in dataset]
else:
no_features = len(dataset[0][0])
for data in dataset:
try:
array = np.array(data, dtype=float)
except ValueError:
raise AssertionError(
"All samples must have the same number of features!")
assert array.shape[-1] == no_features,\
'All series must have the same no features!'
return dataset
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
no_output_samples : int
Size of the output time series.
"""
def __init__(self, sz):
self._sz = sz
def fit(self, X, y=None, **kwargs):
return self
def _interp(self, x):
return np.interp(
np.linspace(0, 1, self._sz),
np.linspace(0, 1, len(x)),
x)
def transform(self, X, **kwargs):
X_ = to_dataset(X)
res = [np.apply_along_axis(self._interp, 0, x) for x in X_]
return to_time_series_dataset(res)
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension. The mean and std can either be
constant (one value per feature over all times) or time varying (one value
per time step per feature).
Parameters
----------
kind: str (one of 'constant', or 'time-varying')
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
"""
def __init__(self, kind='constant', mu=0., std=1.):
assert kind in ['time-varying', 'constant'],\
'axis should be one of time-varying or constant'
self._axis = (1, 0) if kind == 'constant' else 0
self.mu_ = mu
self.std_ = std
def fit(self, X, y=None, **kwargs):
X_ = to_time_series_dataset(X)
self.mean_t = np.mean(X_, axis=self._axis)
self.std_t = np.std(X_, axis=self._axis)
self.std_t[self.std_t == 0.] = 1.
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_
return X_
class Flattener(TransformerMixin):
"""Flattener for time series. Reduces the dataset by one dimension by
flattening the channels"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Flattened time series dataset
"""
X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1)
return X_
class Differentiator(TransformerMixin):
"""Calculates the derivative of a specified channel and and appends
it as new channel"""
def __init__(self, channel):
"""Initialise Featuriser.
Parameters
----------
channel
int, channel to calculate derivative from
"""
self.channel = channel
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset
Returns
-------
numpy.ndarray
Time series dataset with new channel
"""
dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel])
X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2)
return X
class Featuriser(TransformerMixin, BaseEstimator):
"""Featuriser for time series. Calculates a set of statistical measures
on each channel and each defined window of the dataset and returns a
flattened matrix to train sklearn models on"""
def __init__(self, windows=1):
"""Initialise Featuriser.
Parameters
----------
windows
int, number of windows to part the time series in
"""
self.windows = windows
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
for window in np.array_split(X[:, :, i], self.windows, axis=1):
mean = np.mean(window, axis=1)
std = np.std(window, axis=1)
min_d = np.min(window, axis=1)
min_loc = np.argmin(window, axis=1)
max_d = np.max(window, axis=1)
max_loc = np.argmax(window, axis=1)
# Concatenate all values to a numpy array
row = [mean, std, min_d, min_loc, max_d, max_loc]
row = np.transpose(np.vstack(row))
X_ = np.hstack([X_, row])
return X_
class Featuriser2(TransformerMixin):
"""Deprecated. Featuriser for time series. Calculates a set of statistical measures
on each channel of the dataset and returns a flattened matrix to train
sklearn models on"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
table = np.empty((0, 14))
for x in X[:, :, i]:
mean = np.mean(x)
var = np.var(x)
max_d = x.max()
max_loc = np.argmax(x)
min_d = x.min()
min_loc = np.argmin(x)
range_d = max_d - min_d
med = np.median(x)
first = x[0]
last = x[-1]
skew_d = skew(x)
kurt = kurtosis(x)
sum = np.sum(x)
mean_abs_change = np.mean(np.abs(np.diff(x)))
# Concatenate all values to a numpy array
row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum,
mean_abs_change]
row = np.hstack(row)
table = np.vstack([table, row])
X_ = np.hstack((X_,table))
return X_
class Cutter(TransformerMixin):
"""Cuts the last part of the curves."""
def fit(self, X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
list
Cut time series dataset
"""
res = []
for x in X:
idx = np.argmax(np.array(x)[:, 0])
res.append(x[:idx])
return res
def plot_dtc(dtc):
feature_names = []
#channels = ["$pos","$vel","$cur"] # test case
#channels = ["$pos","$cur"] # use case 1
#channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity
channels = ["$pos","$for"] # use case 2
for var in channels:
for i in range(1,int((dtc.n_features_/6/len(channels))+1)):
for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]:
feature_names.append('{0}^{1}_{2}'.format(var,i,f))
#target_names = ["0","1","2","3","4"] # test case
target_names = ["0","1","2","3"] # use case 1 + 2
dot_data = tree.export_graphviz(dtc, out_file=None,
feature_names=feature_names,
class_names=target_names,
filled=False, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'svg'
graph.render("models\\dtc")
|
{
"imported_by": [],
"imports": [
"/src/utils.py"
]
}
|
sebastianden/alpaca
|
/src/main.py
|
import numpy as np
import pandas as pd
from utils import split_df, TimeSeriesResampler, plot_confusion_matrix, Differentiator
from alpaca import Alpaca
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
if __name__ == "__main__":
"""
IMPORT YOUR DATA HERE
X, y =
DEFINE RESAMPLING LENGTH IF NEEDED
sz =
"""
# Turn y to numpy array
y = np.array(y)
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
# Pipeline example
alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),('alpaca', Alpaca())])
alpaca.fit(X_train, y_train)
"""
# Example with additional channel derived from channel 0
alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),
('differentiator',Differentiator(channel=0)),
('alpaca', Alpaca())])
"""
y_pred_bin_veto, y_pred_veto = alpaca.predict(X_test, voting="veto")
y_pred_bin_dem, y_pred_dem = alpaca.predict(X_test, voting="democratic")
y_pred_bin_meta_dtc, y_pred_meta_dtc = alpaca.predict(X_test, voting="meta_dtc")
y_pred_bin_meta_svc, y_pred_meta_svc = alpaca.predict(X_test, voting="meta_svc")
# Store all results in a dataframe
y_pred_indiv = np.column_stack((y_pred_bin_veto, y_pred_veto,y_pred_bin_dem, y_pred_dem, y_pred_bin_meta_dtc,
y_pred_meta_dtc, y_pred_bin_meta_svc, y_pred_meta_svc, y_test)).astype(int)
df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_bin_veto', 'y_pred_veto','y_pred_bin_dem',
'y_pred_dem', 'y_pred_bin_meta_dtc','y_pred_meta_dtc',
'y_pred_bin_meta_svc', 'y_pred_meta_svc', 'y_true'])
df_results.to_csv("results\\y_pred_total.csv",index=False)
print("TEST FINISHED SUCCESSFULLY")
|
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from scipy.stats import kurtosis, skew
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn import tree
import graphviz
# Load the testbench data
def load_test():
df = pd.read_pickle('data\\df_test.pkl')
pivoted = df.pivot(index='sample_nr',columns='idx')
X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2)
y = df.groupby('sample_nr').target.first().values
return X, y
# Load any dataset (WARNING: predefined length!)
def load_data(dataset):
if dataset == 'test':
X, y = load_test()
sz = 230
elif dataset == 'uc1':
X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = 38
elif dataset == 'uc2':
X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
# Length of timeseries for resampler and cnn
sz = 200
resampler = TimeSeriesResampler(sz=sz)
X = resampler.fit_transform(X, y)
y = np.array(y)
return X, y
# Load and split UC1 and UC2 datasets
def split_df(df,index_column, feature_columns, target_name):
labels = []
features = []
for id_, group in df.groupby(index_column):
features.append(group[feature_columns].values.tolist())
labels.append(group[target_name].iloc[0])
return features, labels
# Function to plot confusion matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
"""
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Matplotlib 3.1.1 bug workaround
ax.set_ylim(len(cm)-0.5, -0.5)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def to_time_series_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features)
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
numpy.ndarray of shape
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
try:
np.array(dataset, dtype=np.float)
except ValueError:
raise AssertionError('All elements must have the same length.')
if np.array(dataset[0]).ndim == 0:
dataset = [dataset]
if np.array(dataset[0]).ndim == 1:
no_time_samples = len(dataset[0])
no_features = 1
else:
no_time_samples, no_features = np.array(dataset[0]).shape
return np.array(dataset, dtype=np.float).reshape(
len(dataset),
no_time_samples,
no_features)
def to_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features) where no_time_samples
for different time sereies can be different.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
list of np.arrays
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
if np.array(dataset[0]).ndim == 0:
dataset = [[d] for d in dataset]
if np.array(dataset[0]).ndim == 1:
no_features = 1
dataset = [[[d] for d in data] for data in dataset]
else:
no_features = len(dataset[0][0])
for data in dataset:
try:
array = np.array(data, dtype=float)
except ValueError:
raise AssertionError(
"All samples must have the same number of features!")
assert array.shape[-1] == no_features,\
'All series must have the same no features!'
return dataset
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
no_output_samples : int
Size of the output time series.
"""
def __init__(self, sz):
self._sz = sz
def fit(self, X, y=None, **kwargs):
return self
def _interp(self, x):
return np.interp(
np.linspace(0, 1, self._sz),
np.linspace(0, 1, len(x)),
x)
def transform(self, X, **kwargs):
X_ = to_dataset(X)
res = [np.apply_along_axis(self._interp, 0, x) for x in X_]
return to_time_series_dataset(res)
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension. The mean and std can either be
constant (one value per feature over all times) or time varying (one value
per time step per feature).
Parameters
----------
kind: str (one of 'constant', or 'time-varying')
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
"""
def __init__(self, kind='constant', mu=0., std=1.):
assert kind in ['time-varying', 'constant'],\
'axis should be one of time-varying or constant'
self._axis = (1, 0) if kind == 'constant' else 0
self.mu_ = mu
self.std_ = std
def fit(self, X, y=None, **kwargs):
X_ = to_time_series_dataset(X)
self.mean_t = np.mean(X_, axis=self._axis)
self.std_t = np.std(X_, axis=self._axis)
self.std_t[self.std_t == 0.] = 1.
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_
return X_
class Flattener(TransformerMixin):
"""Flattener for time series. Reduces the dataset by one dimension by
flattening the channels"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Flattened time series dataset
"""
X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1)
return X_
class Differentiator(TransformerMixin):
"""Calculates the derivative of a specified channel and and appends
it as new channel"""
def __init__(self, channel):
"""Initialise Featuriser.
Parameters
----------
channel
int, channel to calculate derivative from
"""
self.channel = channel
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset
Returns
-------
numpy.ndarray
Time series dataset with new channel
"""
dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel])
X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2)
return X
class Featuriser(TransformerMixin, BaseEstimator):
"""Featuriser for time series. Calculates a set of statistical measures
on each channel and each defined window of the dataset and returns a
flattened matrix to train sklearn models on"""
def __init__(self, windows=1):
"""Initialise Featuriser.
Parameters
----------
windows
int, number of windows to part the time series in
"""
self.windows = windows
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
for window in np.array_split(X[:, :, i], self.windows, axis=1):
mean = np.mean(window, axis=1)
std = np.std(window, axis=1)
min_d = np.min(window, axis=1)
min_loc = np.argmin(window, axis=1)
max_d = np.max(window, axis=1)
max_loc = np.argmax(window, axis=1)
# Concatenate all values to a numpy array
row = [mean, std, min_d, min_loc, max_d, max_loc]
row = np.transpose(np.vstack(row))
X_ = np.hstack([X_, row])
return X_
class Featuriser2(TransformerMixin):
"""Deprecated. Featuriser for time series. Calculates a set of statistical measures
on each channel of the dataset and returns a flattened matrix to train
sklearn models on"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
table = np.empty((0, 14))
for x in X[:, :, i]:
mean = np.mean(x)
var = np.var(x)
max_d = x.max()
max_loc = np.argmax(x)
min_d = x.min()
min_loc = np.argmin(x)
range_d = max_d - min_d
med = np.median(x)
first = x[0]
last = x[-1]
skew_d = skew(x)
kurt = kurtosis(x)
sum = np.sum(x)
mean_abs_change = np.mean(np.abs(np.diff(x)))
# Concatenate all values to a numpy array
row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum,
mean_abs_change]
row = np.hstack(row)
table = np.vstack([table, row])
X_ = np.hstack((X_,table))
return X_
class Cutter(TransformerMixin):
"""Cuts the last part of the curves."""
def fit(self, X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
list
Cut time series dataset
"""
res = []
for x in X:
idx = np.argmax(np.array(x)[:, 0])
res.append(x[:idx])
return res
def plot_dtc(dtc):
feature_names = []
#channels = ["$pos","$vel","$cur"] # test case
#channels = ["$pos","$cur"] # use case 1
#channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity
channels = ["$pos","$for"] # use case 2
for var in channels:
for i in range(1,int((dtc.n_features_/6/len(channels))+1)):
for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]:
feature_names.append('{0}^{1}_{2}'.format(var,i,f))
#target_names = ["0","1","2","3","4"] # test case
target_names = ["0","1","2","3"] # use case 1 + 2
dot_data = tree.export_graphviz(dtc, out_file=None,
feature_names=feature_names,
class_names=target_names,
filled=False, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'svg'
graph.render("models\\dtc")
--- FILE SEPARATOR ---
import warnings
warnings.simplefilter(action='ignore')
import pickle
import pandas as pd
import numpy as np
from utils import TimeSeriesScalerMeanVariance, Flattener, Featuriser, plot_dtc
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_curve, auc
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from tslearn.clustering import TimeSeriesKMeans
from tslearn.neighbors import KNeighborsTimeSeriesClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from IPython.display import SVG
from tensorflow.keras.utils import model_to_dot
from tensorflow.keras.utils import plot_model
class Alpaca(ClassifierMixin):
"""
A learning product classification algorithm.
"""
def __init__(self):
self.anomaly_detection = AnomalyDetection()
self.classifier = Classifier()
def fit(self, X, y, stacked=True):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection
# Do GridSearch to get best model
param_grid = {'n_clusters': [10,50,100,200]}
grid = GridSearchCV(self.anomaly_detection, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\ad.csv",index=False)
print(grid.best_params_)
# Take best model
self.anomaly_detection = grid.best_estimator_
# Save the model
with open("models\\ad.pkl", 'wb') as file:
pickle.dump(self.anomaly_detection, file)
# Fit ensemble classifier
self.classifier.fit(X, y, stacked)
return self
def predict(self, X, voting):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions from ensemble with suggested class labels
y_pred_bin: array, shape (n_samples,)
Combined binary predictions
"""
# Class predictions of ensemble
y_pred, y_pred_ens = self.classifier.predict(X, voting=voting)
# Binary predictions of anomaly detector
y_pred_ad = self.anomaly_detection.predict(X)
# Save individual predictions
y_pred_indiv = np.column_stack((y_pred_ens, y_pred_ad)).astype(int)
df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_dtc','y_pred_svc','y_pred_cnn','y_pred_ad'])
df_results.to_csv("results\\y_pred_indiv.csv",index=False)
# Overwrite the entries in y_pred_knn with positive, where ensemble decides positive
y_pred_bin = np.where(y_pred != 0, 1, y_pred_ad)
return y_pred_bin, y_pred
class AnomalyDetection(ClassifierMixin, BaseEstimator):
"""
Anomaly detection with 1-NN and automatic calculation of optimal threshold.
"""
def __init__(self, n_clusters=200):
self.knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, weights='uniform', metric='euclidean', n_jobs=-1)
self.d = None
self.n_clusters = n_clusters
def fit(self, X, y):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection knn over k-means centroids
X_good = X[np.where(y == 0)]
X_bad = X[np.where(y != 0)]
km = TimeSeriesKMeans(n_clusters=self.n_clusters, metric="euclidean",
max_iter=100, random_state=0, n_jobs=-1).fit(X_good)
self.knn.fit(km.cluster_centers_, np.zeros((self.n_clusters,)))
# Calculate distances to all samples in good and bad
d_bad, _ = self.knn.kneighbors(X_bad)
d_good, _ = self.knn.kneighbors(X_good)
# Calculate ROC
y_true = np.hstack((np.zeros(X_good.shape[0]), np.ones(X_bad.shape[0])))
y_score = np.vstack((d_good, d_bad))
fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=1)
# Determine d by Youden index
self.d = thresholds[np.argmax(tpr - fpr)]
return self
def predict(self, X):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
"""
# Binary predictions of anomaly detector
y_pred = np.squeeze(np.where(self.knn.kneighbors(X)[0] < self.d, 0, 1))
return y_pred
class Classifier(ClassifierMixin):
"""
Classifier part with ensemble of estimators.
"""
def __init__(self):
# DTC pipeline
featuriser = Featuriser()
dtc = DecisionTreeClassifier()
self.dtc_pipe = Pipeline([('featuriser', featuriser), ('dtc', dtc)])
# SVC pipeline
scaler = TimeSeriesScalerMeanVariance(kind='constant')
flattener = Flattener()
svc = SVC()
self.svc_pipe = Pipeline([('scaler', scaler), ('flattener', flattener), ('svc', svc)])
# Keras pipeline
#len_filter = round(len_input*0.05)
#num_filter = 8
cnn = KerasClassifier(build_fn=build_cnn, epochs=100, verbose=0)
self.cnn_pipe = Pipeline([('scaler', scaler), ('cnn', cnn)])
# Meta classifier
self.meta_dtc = DecisionTreeClassifier()
self.meta_svc = SVC()
def fit(self, X, y, stacked):
"""
Fit each individual estimator of the ensemble model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit DTC
# Do GridSearch to get best model
param_grid = {'featuriser__windows': [1, 2, 3, 4, 5, 6],
'dtc__max_depth': [3, 4, 5],
'dtc__criterion': ['gini', 'entropy']}
grid = GridSearchCV(self.dtc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\dtc.csv",index=False)
print(grid.best_params_)
# Take best model
self.dtc_pipe = grid.best_estimator_
# Plot the dtc
#plot_dtc(self.dtc_pipe['dtc'])
# Save the model
with open("models\\dtc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit SVC
# Do GridSearch to get best model
param_grid = {'svc__C': [10, 100, 1000, 10000],
'svc__gamma': [0.01, 0.001, 0.0001, 0.00001],
'svc__degree': [2, 3],
'svc__kernel': ['rbf', 'linear', 'poly']}
grid = GridSearchCV(self.svc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\svc.csv",index=False)
print(grid.best_params_)
# Take best model
self.svc_pipe = grid.best_estimator_
# Save the model
with open("models\\svc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit CNN
# Do GridSearch to get best model
param_grid = {'cnn__num_channels':[X.shape[2]],
'cnn__len_input':[X.shape[1]],
'cnn__num_classes':[np.unique(y).shape[0]],
'cnn__batch_size': [20, 30],
'cnn__num_filter': [4, 8, 16],
'cnn__num_layer': [1, 2],
'cnn__len_filter': [0.05, 0.1, 0.2]} # len_filter is defined as fraction of input_len
grid = GridSearchCV(self.cnn_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\cnn.csv",index=False)
print(grid.best_params_)
# Take best model
self.cnn_pipe = grid.best_estimator_
# Save the model
self.cnn_pipe['cnn'].model.save("models\\cnn.h5")
# Fit the Metaclassifiers
if stacked:
# Get level 1 classifier predictions as training data
X_stacked, y_stacked = kfoldcrossval(self, X, y, k=5)
# Fit Meta DTC
self.meta_dtc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_dtc.pkl", 'wb') as file:
pickle.dump(self.meta_dtc, file)
# Fit Meta SVC
self.meta_svc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_svc.pkl", 'wb') as file:
pickle.dump(self.meta_svc, file)
return self
def predict(self, X, voting='veto'):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
y_pred_ens: array, shape (n_samples, 3)
Predictions of the individual estimators
"""
y_pred = np.empty(np.shape(X)[0])
# Parallelize this part
y_dtc = self.dtc_pipe.predict(X)
y_svc = self.svc_pipe.predict(X)
y_cnn = self.cnn_pipe.predict(X)
y_pred_ens = np.stack([y_dtc, y_svc, y_cnn], axis=1).astype(int)
if voting == 'veto':
for i in range(np.shape(X)[0]):
if y_dtc[i] == y_svc[i] == y_cnn[i]:
y_pred[i] = y_dtc[i]
else:
y_pred[i] = -1
if voting == 'democratic':
for i in range(np.shape(X)[0]):
y_pred[i] = np.argmax(np.bincount(y_pred_ens[i, :]))
if voting == 'meta_dtc':
y_pred = self.meta_dtc.predict(y_pred_ens)
if voting == 'meta_svc':
y_pred = self.meta_svc.predict(y_pred_ens)
return y_pred, y_pred_ens
def kfoldcrossval(model, X, y, k=5):
"""
Performs another cross-validation with the optimal models in order to
get the level 1 predictions to train the meta classifier.
Parameters
----------
model: object
Ensemble classifier object
X : array-like of shape (n_samples, n_features, n_channels)
Samples.
y : array-like of shape (n_samples,)
True labels for X.
k: int
Number of splits
Returns
-------
X_stack: array-like of shape (n_samples, n_features)
Level 1 predictions as training data for metaclassifier
y_stack: array-like of shape (n_samples,)
Targets for metaclassifier
"""
kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=42)
X_stack = np.empty((0, 3))
y_stack = np.empty((0,))
# Make a copy of the already fitted classifiers (to not overwrite the originals)
dtc_temp = clone(model.dtc_pipe)
svc_temp = clone(model.svc_pipe)
cnn_temp = clone(model.cnn_pipe)
# Train classifiers agin in kfold crossvalidation to get level 1 predictions
for train, test in kfold.split(X, y):
# Train all models on train
dtc_temp.fit(X[train], y[train])
svc_temp.fit(X[train], y[train])
cnn_temp.fit(X[train], y[train])
# Test all on test
y0 = dtc_temp.predict(X[test])
y1 = svc_temp.predict(X[test])
y2 = cnn_temp.predict(X[test])
# Concatenate predictions of individual classifier
a = np.stack((y0, y1, y2), axis=-1).astype(int)
# Concatenate with predictions from other splits
X_stack = np.vstack((X_stack, a))
y_stack = np.hstack((y_stack, y[test]))
return X_stack, y_stack
def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input, num_classes):
"""
Function returning a keras model.
Parameters
----------
num_filter: int
Number of filters / kernels in the conv layer
len_filter: float
Length of the filters / kernels in the conv layer as fraction of inputlength
num_layer: int
Number of convlutional layers in the model
num_channels: int
Number of channels of the input
len_input: int
Number of dimensions of the input
num_classes: int
Number of classes in the dataset = Number of outputs
Returns
-------
model: sequential keras model
Keras CNN model ready to be trained
"""
model = Sequential()
# First Conv Layer
model.add(Conv1D(filters=num_filter, kernel_size=int(len_filter*len_input), strides=1, padding="same",
activation='relu', input_shape=(len_input, num_channels), name='block1_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block1_pool'))
# Other Conv Layers
for l in range(2, num_layer + 1):
model.add(Conv1D(filters=num_filter*l, kernel_size=int(len_filter * len_input), strides=1, padding="same",
activation='relu', name='block' + str(l) + '_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block' + str(l) + '_pool'))
model.add(Flatten(name='flatten'))
model.add(Dense(100, activation='relu', name='fc1'))
model.add(Dense(num_classes, activation='softmax',name='predictions'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
plot_model(model,dpi = 300, show_shapes=True, to_file='models\\cnn.png')
return model
|
{
"imported_by": [],
"imports": [
"/src/utils.py",
"/src/alpaca.py"
]
}
|
sebastianden/alpaca
|
/src/test_time.py
|
from alpaca import Alpaca
from utils import to_time_series_dataset, to_dataset, split_df, TimeSeriesResampler
import time
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
max_sample = 20
for dataset in ['uc2']:
if dataset == 'uc1':
X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
y = np.array(y)
# Length of timeseries for resampler and cnn
sz = 38
# Number of channels for cnn
num_channels = len(X[0][0])
# Number of classes for cnn
num_classes = np.unique(y).shape[0]
if dataset == 'uc2':
X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
y = np.array(y)
# Length of timeseries for resampler and cnn
sz = 200
# Number of channels for cnn
num_channels = len(X[0][0])
# Number of classes for cnn
num_classes = np.unique(y).shape[0]
resampler = TimeSeriesResampler(sz=sz)
alpaca = Pipeline([('resampler', resampler),
('classifier', Alpaca())])
alpaca.fit(X, y, classifier__stacked=False, classifier__n_clusters=200)
# Measure time for single sample processing
t = []
for i in range(1, max_sample+1):
for j in range(10):
rand = np.random.randint(2000)
sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1))
start = time.process_time()
for k in range(100):
for l in range(i):
y_pred_bin, y_pred = alpaca.predict(sample, voting='veto')
end = time.process_time()
t.append([i, (end-start)/100, 'single'])
# Measure time for batch processing of multiple sample numbers
for i in range(1, max_sample+1):
for j in range(10):
rand = np.random.randint(2000)
if i == 1:
sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1))
else:
sample = to_dataset(X[rand:rand+i])
start = time.process_time()
for k in range(100):
y_pred_bin, y_pred = alpaca.predict(sample, voting='veto')
end = time.process_time()
t.append([i, (end-start)/100, 'batch'])
df = pd.DataFrame(t, columns=['Sample Number', 'Time', 'Type'])
df.to_csv("..\\results\\Time_"+dataset+".csv")
|
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from scipy.stats import kurtosis, skew
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn import tree
import graphviz
# Load the testbench data
def load_test():
df = pd.read_pickle('data\\df_test.pkl')
pivoted = df.pivot(index='sample_nr',columns='idx')
X = np.stack([pivoted['position'].values, pivoted['velocity'].values, pivoted['current'].values], axis=2)
y = df.groupby('sample_nr').target.first().values
return X, y
# Load any dataset (WARNING: predefined length!)
def load_data(dataset):
if dataset == 'test':
X, y = load_test()
sz = 230
elif dataset == 'uc1':
X, y = split_df(pd.read_pickle('data\\df_uc1.pkl'),
index_column='run_id',
feature_columns=['fldPosition', 'fldCurrent'],
target_name='target')
# Length of timeseries for resampler and cnn
sz = 38
elif dataset == 'uc2':
X, y = split_df(pd.read_pickle('data\\df_uc2.pkl'),
index_column='run_id',
feature_columns=['position', 'force'],
target_name='label')
# Length of timeseries for resampler and cnn
sz = 200
resampler = TimeSeriesResampler(sz=sz)
X = resampler.fit_transform(X, y)
y = np.array(y)
return X, y
# Load and split UC1 and UC2 datasets
def split_df(df,index_column, feature_columns, target_name):
labels = []
features = []
for id_, group in df.groupby(index_column):
features.append(group[feature_columns].values.tolist())
labels.append(group[target_name].iloc[0])
return features, labels
# Function to plot confusion matrix
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
"""
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
ylabel='True label',
xlabel='Predicted label')
# Matplotlib 3.1.1 bug workaround
ax.set_ylim(len(cm)-0.5, -0.5)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def to_time_series_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features)
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
numpy.ndarray of shape
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
try:
np.array(dataset, dtype=np.float)
except ValueError:
raise AssertionError('All elements must have the same length.')
if np.array(dataset[0]).ndim == 0:
dataset = [dataset]
if np.array(dataset[0]).ndim == 1:
no_time_samples = len(dataset[0])
no_features = 1
else:
no_time_samples, no_features = np.array(dataset[0]).shape
return np.array(dataset, dtype=np.float).reshape(
len(dataset),
no_time_samples,
no_features)
def to_dataset(dataset):
"""Transforms a time series dataset so that it has the following format:
(no_time_series, no_time_samples, no_features) where no_time_samples
for different time sereies can be different.
Parameters
----------
dataset : array-like
The dataset of time series to be transformed.
Returns
-------
list of np.arrays
(no_time_series, no_time_samples, no_features)
"""
assert len(dataset) != 0, 'dataset is empty'
if np.array(dataset[0]).ndim == 0:
dataset = [[d] for d in dataset]
if np.array(dataset[0]).ndim == 1:
no_features = 1
dataset = [[[d] for d in data] for data in dataset]
else:
no_features = len(dataset[0][0])
for data in dataset:
try:
array = np.array(data, dtype=float)
except ValueError:
raise AssertionError(
"All samples must have the same number of features!")
assert array.shape[-1] == no_features,\
'All series must have the same no features!'
return dataset
class TimeSeriesResampler(TransformerMixin):
"""Resampler for time series. Resample time series so that they reach the
target size.
Parameters
----------
no_output_samples : int
Size of the output time series.
"""
def __init__(self, sz):
self._sz = sz
def fit(self, X, y=None, **kwargs):
return self
def _interp(self, x):
return np.interp(
np.linspace(0, 1, self._sz),
np.linspace(0, 1, len(x)),
x)
def transform(self, X, **kwargs):
X_ = to_dataset(X)
res = [np.apply_along_axis(self._interp, 0, x) for x in X_]
return to_time_series_dataset(res)
class TimeSeriesScalerMeanVariance(TransformerMixin):
"""Scaler for time series. Scales time series so that their mean (resp.
standard deviation) in each dimension. The mean and std can either be
constant (one value per feature over all times) or time varying (one value
per time step per feature).
Parameters
----------
kind: str (one of 'constant', or 'time-varying')
mu : float (default: 0.)
Mean of the output time series.
std : float (default: 1.)
Standard deviation of the output time series.
"""
def __init__(self, kind='constant', mu=0., std=1.):
assert kind in ['time-varying', 'constant'],\
'axis should be one of time-varying or constant'
self._axis = (1, 0) if kind == 'constant' else 0
self.mu_ = mu
self.std_ = std
def fit(self, X, y=None, **kwargs):
X_ = to_time_series_dataset(X)
self.mean_t = np.mean(X_, axis=self._axis)
self.std_t = np.std(X_, axis=self._axis)
self.std_t[self.std_t == 0.] = 1.
return self
def transform(self, X, **kwargs):
"""Fit to data, then transform it.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Rescaled time series dataset
"""
X_ = to_time_series_dataset(X)
X_ = (X_ - self.mean_t) * self.std_ / self.std_t + self.mu_
return X_
class Flattener(TransformerMixin):
"""Flattener for time series. Reduces the dataset by one dimension by
flattening the channels"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Flattened time series dataset
"""
X_ = X.transpose(0, 2, 1).reshape(X.shape[0],-1)
return X_
class Differentiator(TransformerMixin):
"""Calculates the derivative of a specified channel and and appends
it as new channel"""
def __init__(self, channel):
"""Initialise Featuriser.
Parameters
----------
channel
int, channel to calculate derivative from
"""
self.channel = channel
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset
Returns
-------
numpy.ndarray
Time series dataset with new channel
"""
dt = np.diff(X[:, :, self.channel], axis=1, prepend=X[0, 0, self.channel])
X = np.concatenate((X, np.expand_dims(dt, axis=2)), axis=2)
return X
class Featuriser(TransformerMixin, BaseEstimator):
"""Featuriser for time series. Calculates a set of statistical measures
on each channel and each defined window of the dataset and returns a
flattened matrix to train sklearn models on"""
def __init__(self, windows=1):
"""Initialise Featuriser.
Parameters
----------
windows
int, number of windows to part the time series in
"""
self.windows = windows
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
for window in np.array_split(X[:, :, i], self.windows, axis=1):
mean = np.mean(window, axis=1)
std = np.std(window, axis=1)
min_d = np.min(window, axis=1)
min_loc = np.argmin(window, axis=1)
max_d = np.max(window, axis=1)
max_loc = np.argmax(window, axis=1)
# Concatenate all values to a numpy array
row = [mean, std, min_d, min_loc, max_d, max_loc]
row = np.transpose(np.vstack(row))
X_ = np.hstack([X_, row])
return X_
class Featuriser2(TransformerMixin):
"""Deprecated. Featuriser for time series. Calculates a set of statistical measures
on each channel of the dataset and returns a flattened matrix to train
sklearn models on"""
def __init__(self):
pass
def fit(self,X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
numpy.ndarray
Featurised time series dataset
"""
X_ = np.empty((X.shape[0], 0))
for i in range(X.shape[2]):
table = np.empty((0, 14))
for x in X[:, :, i]:
mean = np.mean(x)
var = np.var(x)
max_d = x.max()
max_loc = np.argmax(x)
min_d = x.min()
min_loc = np.argmin(x)
range_d = max_d - min_d
med = np.median(x)
first = x[0]
last = x[-1]
skew_d = skew(x)
kurt = kurtosis(x)
sum = np.sum(x)
mean_abs_change = np.mean(np.abs(np.diff(x)))
# Concatenate all values to a numpy array
row = [mean, var, med, first, last, range_d, min_d, min_loc, max_d, max_loc, skew_d, kurt, sum,
mean_abs_change]
row = np.hstack(row)
table = np.vstack([table, row])
X_ = np.hstack((X_,table))
return X_
class Cutter(TransformerMixin):
"""Cuts the last part of the curves."""
def fit(self, X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
"""Transform data.
Parameters
----------
X
Time series dataset to be rescaled
Returns
-------
list
Cut time series dataset
"""
res = []
for x in X:
idx = np.argmax(np.array(x)[:, 0])
res.append(x[:idx])
return res
def plot_dtc(dtc):
feature_names = []
#channels = ["$pos","$vel","$cur"] # test case
#channels = ["$pos","$cur"] # use case 1
#channels = ["$pos","$cur","$vel"] # use case 1 with derived velocity
channels = ["$pos","$for"] # use case 2
for var in channels:
for i in range(1,int((dtc.n_features_/6/len(channels))+1)):
for f in ["{mean}$","{std}$","{min}$","{min-ind}$","{max}$","{max-ind}$"]:
feature_names.append('{0}^{1}_{2}'.format(var,i,f))
#target_names = ["0","1","2","3","4"] # test case
target_names = ["0","1","2","3"] # use case 1 + 2
dot_data = tree.export_graphviz(dtc, out_file=None,
feature_names=feature_names,
class_names=target_names,
filled=False, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph.format = 'svg'
graph.render("models\\dtc")
--- FILE SEPARATOR ---
import warnings
warnings.simplefilter(action='ignore')
import pickle
import pandas as pd
import numpy as np
from utils import TimeSeriesScalerMeanVariance, Flattener, Featuriser, plot_dtc
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_curve, auc
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.base import ClassifierMixin, BaseEstimator, clone
from tslearn.clustering import TimeSeriesKMeans
from tslearn.neighbors import KNeighborsTimeSeriesClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from IPython.display import SVG
from tensorflow.keras.utils import model_to_dot
from tensorflow.keras.utils import plot_model
class Alpaca(ClassifierMixin):
"""
A learning product classification algorithm.
"""
def __init__(self):
self.anomaly_detection = AnomalyDetection()
self.classifier = Classifier()
def fit(self, X, y, stacked=True):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection
# Do GridSearch to get best model
param_grid = {'n_clusters': [10,50,100,200]}
grid = GridSearchCV(self.anomaly_detection, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\ad.csv",index=False)
print(grid.best_params_)
# Take best model
self.anomaly_detection = grid.best_estimator_
# Save the model
with open("models\\ad.pkl", 'wb') as file:
pickle.dump(self.anomaly_detection, file)
# Fit ensemble classifier
self.classifier.fit(X, y, stacked)
return self
def predict(self, X, voting):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions from ensemble with suggested class labels
y_pred_bin: array, shape (n_samples,)
Combined binary predictions
"""
# Class predictions of ensemble
y_pred, y_pred_ens = self.classifier.predict(X, voting=voting)
# Binary predictions of anomaly detector
y_pred_ad = self.anomaly_detection.predict(X)
# Save individual predictions
y_pred_indiv = np.column_stack((y_pred_ens, y_pred_ad)).astype(int)
df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_dtc','y_pred_svc','y_pred_cnn','y_pred_ad'])
df_results.to_csv("results\\y_pred_indiv.csv",index=False)
# Overwrite the entries in y_pred_knn with positive, where ensemble decides positive
y_pred_bin = np.where(y_pred != 0, 1, y_pred_ad)
return y_pred_bin, y_pred
class AnomalyDetection(ClassifierMixin, BaseEstimator):
"""
Anomaly detection with 1-NN and automatic calculation of optimal threshold.
"""
def __init__(self, n_clusters=200):
self.knn = KNeighborsTimeSeriesClassifier(n_neighbors=1, weights='uniform', metric='euclidean', n_jobs=-1)
self.d = None
self.n_clusters = n_clusters
def fit(self, X, y):
"""
Fit the algorithm according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
Returns
-------
self: object
Fitted model
"""
# Fit anomaly detection knn over k-means centroids
X_good = X[np.where(y == 0)]
X_bad = X[np.where(y != 0)]
km = TimeSeriesKMeans(n_clusters=self.n_clusters, metric="euclidean",
max_iter=100, random_state=0, n_jobs=-1).fit(X_good)
self.knn.fit(km.cluster_centers_, np.zeros((self.n_clusters,)))
# Calculate distances to all samples in good and bad
d_bad, _ = self.knn.kneighbors(X_bad)
d_good, _ = self.knn.kneighbors(X_good)
# Calculate ROC
y_true = np.hstack((np.zeros(X_good.shape[0]), np.ones(X_bad.shape[0])))
y_score = np.vstack((d_good, d_bad))
fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=1)
# Determine d by Youden index
self.d = thresholds[np.argmax(tpr - fpr)]
return self
def predict(self, X):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
"""
# Binary predictions of anomaly detector
y_pred = np.squeeze(np.where(self.knn.kneighbors(X)[0] < self.d, 0, 1))
return y_pred
class Classifier(ClassifierMixin):
"""
Classifier part with ensemble of estimators.
"""
def __init__(self):
# DTC pipeline
featuriser = Featuriser()
dtc = DecisionTreeClassifier()
self.dtc_pipe = Pipeline([('featuriser', featuriser), ('dtc', dtc)])
# SVC pipeline
scaler = TimeSeriesScalerMeanVariance(kind='constant')
flattener = Flattener()
svc = SVC()
self.svc_pipe = Pipeline([('scaler', scaler), ('flattener', flattener), ('svc', svc)])
# Keras pipeline
#len_filter = round(len_input*0.05)
#num_filter = 8
cnn = KerasClassifier(build_fn=build_cnn, epochs=100, verbose=0)
self.cnn_pipe = Pipeline([('scaler', scaler), ('cnn', cnn)])
# Meta classifier
self.meta_dtc = DecisionTreeClassifier()
self.meta_svc = SVC()
def fit(self, X, y, stacked):
"""
Fit each individual estimator of the ensemble model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Training samples.
y : array-like of shape (n_samples,)
True labels for X.
stacked: bool
If true train a meta classifier on kfold CV predictions of the level 1 classifiers
Returns
-------
self: object
Fitted model
"""
# Fit DTC
# Do GridSearch to get best model
param_grid = {'featuriser__windows': [1, 2, 3, 4, 5, 6],
'dtc__max_depth': [3, 4, 5],
'dtc__criterion': ['gini', 'entropy']}
grid = GridSearchCV(self.dtc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\dtc.csv",index=False)
print(grid.best_params_)
# Take best model
self.dtc_pipe = grid.best_estimator_
# Plot the dtc
#plot_dtc(self.dtc_pipe['dtc'])
# Save the model
with open("models\\dtc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit SVC
# Do GridSearch to get best model
param_grid = {'svc__C': [10, 100, 1000, 10000],
'svc__gamma': [0.01, 0.001, 0.0001, 0.00001],
'svc__degree': [2, 3],
'svc__kernel': ['rbf', 'linear', 'poly']}
grid = GridSearchCV(self.svc_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\svc.csv",index=False)
print(grid.best_params_)
# Take best model
self.svc_pipe = grid.best_estimator_
# Save the model
with open("models\\svc_pipe.pkl", 'wb') as file:
pickle.dump(self.dtc_pipe, file)
# Fit CNN
# Do GridSearch to get best model
param_grid = {'cnn__num_channels':[X.shape[2]],
'cnn__len_input':[X.shape[1]],
'cnn__num_classes':[np.unique(y).shape[0]],
'cnn__batch_size': [20, 30],
'cnn__num_filter': [4, 8, 16],
'cnn__num_layer': [1, 2],
'cnn__len_filter': [0.05, 0.1, 0.2]} # len_filter is defined as fraction of input_len
grid = GridSearchCV(self.cnn_pipe, param_grid, cv=5, refit=True, verbose=2, n_jobs=-1)
grid.fit(X, y)
# Save results
df_results = pd.DataFrame.from_dict(data=grid.cv_results_)
df_results.to_csv("results\\cnn.csv",index=False)
print(grid.best_params_)
# Take best model
self.cnn_pipe = grid.best_estimator_
# Save the model
self.cnn_pipe['cnn'].model.save("models\\cnn.h5")
# Fit the Metaclassifiers
if stacked:
# Get level 1 classifier predictions as training data
X_stacked, y_stacked = kfoldcrossval(self, X, y, k=5)
# Fit Meta DTC
self.meta_dtc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_dtc.pkl", 'wb') as file:
pickle.dump(self.meta_dtc, file)
# Fit Meta SVC
self.meta_svc.fit(X_stacked, y_stacked)
# Save the model
with open("models\\meta_svc.pkl", 'wb') as file:
pickle.dump(self.meta_svc, file)
return self
def predict(self, X, voting='veto'):
"""
Perform a classification on samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features, n_channels)
Test samples.
voting: string
Voting scheme to use
Returns
-------
y_pred: array, shape (n_samples,)
Predictions
y_pred_ens: array, shape (n_samples, 3)
Predictions of the individual estimators
"""
y_pred = np.empty(np.shape(X)[0])
# Parallelize this part
y_dtc = self.dtc_pipe.predict(X)
y_svc = self.svc_pipe.predict(X)
y_cnn = self.cnn_pipe.predict(X)
y_pred_ens = np.stack([y_dtc, y_svc, y_cnn], axis=1).astype(int)
if voting == 'veto':
for i in range(np.shape(X)[0]):
if y_dtc[i] == y_svc[i] == y_cnn[i]:
y_pred[i] = y_dtc[i]
else:
y_pred[i] = -1
if voting == 'democratic':
for i in range(np.shape(X)[0]):
y_pred[i] = np.argmax(np.bincount(y_pred_ens[i, :]))
if voting == 'meta_dtc':
y_pred = self.meta_dtc.predict(y_pred_ens)
if voting == 'meta_svc':
y_pred = self.meta_svc.predict(y_pred_ens)
return y_pred, y_pred_ens
def kfoldcrossval(model, X, y, k=5):
"""
Performs another cross-validation with the optimal models in order to
get the level 1 predictions to train the meta classifier.
Parameters
----------
model: object
Ensemble classifier object
X : array-like of shape (n_samples, n_features, n_channels)
Samples.
y : array-like of shape (n_samples,)
True labels for X.
k: int
Number of splits
Returns
-------
X_stack: array-like of shape (n_samples, n_features)
Level 1 predictions as training data for metaclassifier
y_stack: array-like of shape (n_samples,)
Targets for metaclassifier
"""
kfold = StratifiedKFold(n_splits=k, shuffle=True, random_state=42)
X_stack = np.empty((0, 3))
y_stack = np.empty((0,))
# Make a copy of the already fitted classifiers (to not overwrite the originals)
dtc_temp = clone(model.dtc_pipe)
svc_temp = clone(model.svc_pipe)
cnn_temp = clone(model.cnn_pipe)
# Train classifiers agin in kfold crossvalidation to get level 1 predictions
for train, test in kfold.split(X, y):
# Train all models on train
dtc_temp.fit(X[train], y[train])
svc_temp.fit(X[train], y[train])
cnn_temp.fit(X[train], y[train])
# Test all on test
y0 = dtc_temp.predict(X[test])
y1 = svc_temp.predict(X[test])
y2 = cnn_temp.predict(X[test])
# Concatenate predictions of individual classifier
a = np.stack((y0, y1, y2), axis=-1).astype(int)
# Concatenate with predictions from other splits
X_stack = np.vstack((X_stack, a))
y_stack = np.hstack((y_stack, y[test]))
return X_stack, y_stack
def build_cnn(num_filter, len_filter, num_layer, num_channels, len_input, num_classes):
"""
Function returning a keras model.
Parameters
----------
num_filter: int
Number of filters / kernels in the conv layer
len_filter: float
Length of the filters / kernels in the conv layer as fraction of inputlength
num_layer: int
Number of convlutional layers in the model
num_channels: int
Number of channels of the input
len_input: int
Number of dimensions of the input
num_classes: int
Number of classes in the dataset = Number of outputs
Returns
-------
model: sequential keras model
Keras CNN model ready to be trained
"""
model = Sequential()
# First Conv Layer
model.add(Conv1D(filters=num_filter, kernel_size=int(len_filter*len_input), strides=1, padding="same",
activation='relu', input_shape=(len_input, num_channels), name='block1_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block1_pool'))
# Other Conv Layers
for l in range(2, num_layer + 1):
model.add(Conv1D(filters=num_filter*l, kernel_size=int(len_filter * len_input), strides=1, padding="same",
activation='relu', name='block' + str(l) + '_conv1'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding="same", name='block' + str(l) + '_pool'))
model.add(Flatten(name='flatten'))
model.add(Dense(100, activation='relu', name='fc1'))
model.add(Dense(num_classes, activation='softmax',name='predictions'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
plot_model(model,dpi = 300, show_shapes=True, to_file='models\\cnn.png')
return model
|
{
"imported_by": [],
"imports": [
"/src/utils.py",
"/src/alpaca.py"
]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.