Dataset Viewer
Auto-converted to Parquet Duplicate
repo_name
stringclasses
100 values
file_path
stringlengths
5
100
file_content
stringlengths
27
51.9k
imported_files_content
stringlengths
45
239k
import_relationships
dict
shuishen112/pairwise-rnn
/models/__init__.py
from .QA_CNN_pairwise import QA_CNN_extend as CNN from .QA_RNN_pairwise import QA_RNN_extend as RNN from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN def setup(opt): if opt["model_name"]=="cnn": model=CNN(opt) elif opt["model_name"]=="rnn": model=RNN(opt) elif opt['model_name']=='qcnn': model=QCNN(opt) else: print("no model") exit(0) return model
#coding:utf-8 import tensorflow as tf import numpy as np from tensorflow.contrib import rnn import models.blocks as blocks # model_type :apn or qacnn class QA_CNN_extend(object): # def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size, # dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'): # # """ # QA_RNN model for question answering # # Args: # self.dropout_keep_prob: dropout rate # self.num_filters : number of filters # self.para : parameter list # self.extend_feature_dim : my extend feature dimension # self.max_input_left : the length of question # self.max_input_right : the length of answer # self.pooling : pooling strategy :max pooling or attentive pooling # # """ # self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob') # self.num_filters = num_filters # self.embeddings = embeddings # self.embedding_size = embedding_size # self.batch_size = batch_size # self.filter_sizes = filter_sizes # self.l2_reg_lambda = l2_reg_lambda # self.para = [] # # self.max_input_left = max_input_left # self.max_input_right = max_input_right # self.trainable = trainable # self.vocab_size = vocab_size # self.pooling = pooling # self.total_num_filter = len(self.filter_sizes) * self.num_filters # # self.conv = conv # self.pooling = 'traditional' # self.learning_rate = learning_rate # # self.hidden_size = hidden_size # # self.attention_size = 100 def __init__(self,opt): for key,value in opt.items(): self.__setattr__(key,value) self.attention_size = 100 self.pooling = 'mean' self.total_num_filter = len(self.filter_sizes) * self.num_filters self.para = [] self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob') def create_placeholder(self): print(('Create placeholders')) # he length of the sentence is varied according to the batch,so the None,None self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question') self.max_input_left = tf.shape(self.question)[1] self.batch_size = tf.shape(self.question)[0] self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer') self.max_input_right = tf.shape(self.answer)[1] self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right') # self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask') # self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask') # self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask') def add_embeddings(self): print( 'add embeddings') if self.embeddings is not None: print( "load embedding") W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable) else: print( "random embedding") W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable) self.embedding_W = W # self.overlap_W = tf.Variable(a,name="W",trainable = True) self.para.append(self.embedding_W) self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question) self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer) self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative) #real length self.q_len,self.q_mask = blocks.length(self.question) self.a_len,self.a_mask = blocks.length(self.answer) self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative) def convolution(self): print( 'convolution:wide_convolution') self.kernels = [] for i,filter_size in enumerate(self.filter_sizes): with tf.name_scope('conv-max-pool-%s' % filter_size): filter_shape = [filter_size,self.embedding_size,1,self.num_filters] W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W") b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b") self.kernels.append((W,b)) self.para.append(W) self.para.append(b) embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding] self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings] #convolution def pooling_graph(self): if self.pooling == 'mean': self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask) self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask) self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask) elif self.pooling == 'attentive': self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'position': self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) elif self.pooling == 'traditional': print( self.pooling) print(self.q_cnn) self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask) self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask) def para_initial(self): # print(("---------")) # self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp')) self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U')) self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm')) self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm')) self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms')) self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi')) def mean_pooling(self,conv,mask): conv = tf.squeeze(conv,2) print( tf.expand_dims(tf.cast(mask,tf.float32),-1)) # conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1)) # self.see = conv_mask # print( conv_mask) return tf.reduce_mean(conv,axis = 1); def attentive_pooling(self,input_left,input_right,q_mask,a_mask): Q = tf.squeeze(input_left,axis = 2) A = tf.squeeze(input_right,axis = 2) print( Q) print( A) # Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A') # G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\ # A,transpose_b = True),name = 'G') first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U) second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters]) result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1])) print( second_step) print( tf.transpose(A,perm = [0,2,1])) # print( 'result',result) G = tf.tanh(result) # G = result # column-wise pooling ,row-wise pooling row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling') col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling') self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q') self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1)) self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a') self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1)) self.see = G R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q') R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a') return R_q,R_a def traditional_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2)) Q = tf.reduce_mean(input_left_mask,1) a_shape = tf.shape(input_right) A = tf.reshape(input_right,[-1,self.total_num_filter]) m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1)) f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1])) self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2)) self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True)) self.see = self.f_attention_norm a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1) return Q,a_attention def position_attention(self,input_left,input_right,q_mask,a_mask): input_left = tf.squeeze(input_left,axis = 2) input_right = tf.squeeze(input_right,axis = 2) # Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q') # A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A') Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) QU = tf.matmul(Q,self.U) QUA = tf.multiply(tf.expand_dims(QU,1),input_right) self.attention_a = tf.cast(tf.argmax(QUA,2) ,tf.float32) # q_shape = tf.shape(input_left) # Q_1 = tf.reshape(input_left,[-1,self.total_num_filter]) # QU = tf.matmul(Q_1,self.U) # QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter]) # A_1 = tf.transpose(input_right,[0,2,1]) # QUA = tf.matmul(QU_1,A_1) # QUA = tf.nn.l2_normalize(QUA,1) # G = tf.tanh(QUA) # Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1) # # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)) # row_pooling = tf.reduce_max(G,1,name="row_pooling") # col_pooling = tf.reduce_max(G,2,name="col_pooling") # self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a") self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32)) self.see = self.attention_a self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True)) self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter]) return Q ,self.r_a def create_loss(self): with tf.name_scope('score'): self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn) self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn) l2_loss = tf.constant(0.0) for p in self.para: l2_loss += tf.nn.l2_loss(p) with tf.name_scope("loss"): self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13))) self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss tf.summary.scalar('loss', self.loss) # Accuracy with tf.name_scope("accuracy"): self.correct = tf.equal(0.0, self.losses) self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy") tf.summary.scalar('accuracy', self.accuracy) def create_op(self): self.global_step = tf.Variable(0, name = "global_step", trainable = False) self.optimizer = tf.train.AdamOptimizer(self.learning_rate) self.grads_and_vars = self.optimizer.compute_gradients(self.loss) self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step) def max_pooling(self,conv,input_length): pooled = tf.nn.max_pool( conv, ksize = [1, input_length, 1, 1], strides = [1, 1, 1, 1], padding = 'VALID', name="pool") return pooled def getCosine(self,q,a): pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder) pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder) pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1)) pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1)) pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1) score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores") return score def wide_convolution(self,embedding): cnn_outputs = [] for i,filter_size in enumerate(self.filter_sizes): conv = tf.nn.conv2d( embedding, self.kernels[i][0], strides=[1, 1, self.embedding_size, 1], padding='SAME', name="conv-1" ) h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1") cnn_outputs.append(h) cnn_reshaped = tf.concat(cnn_outputs,3) return cnn_reshaped def variable_summaries(self,var): with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean', mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar('stddev', stddev) tf.summary.scalar('max', tf.reduce_max(var)) tf.summary.scalar('min', tf.reduce_min(var)) tf.summary.histogram('histogram', var) def build_graph(self): self.create_placeholder() self.add_embeddings() self.para_initial() self.convolution() self.pooling_graph() self.create_loss() self.create_op() self.merged = tf.summary.merge_all() def train(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], self.answer_negative:data[2], # self.q_mask:data[3], # self.a_mask:data[4], # self.a_neg_mask:data[5], self.dropout_keep_prob_holder:self.dropout_keep_prob } _, summary, step, loss, accuracy,score12, score13, see = sess.run( [self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see], feed_dict) return _, summary, step, loss, accuracy,score12, score13, see def predict(self,sess,data): feed_dict = { self.question:data[0], self.answer:data[1], # self.q_mask:data[2], # self.a_mask:data[3], self.dropout_keep_prob_holder:1.0 } score = sess.run( self.score12, feed_dict) return score if __name__ == '__main__': cnn = QA_CNN_extend( max_input_left = 33, max_input_right = 40, batch_size = 3, vocab_size = 5000, embedding_size = 100, filter_sizes = [3,4,5], num_filters = 64, hidden_size = 100, dropout_keep_prob = 1.0, embeddings = None, l2_reg_lambda = 0.0, trainable = True, pooling = 'max', conv = 'wide') cnn.build_graph() input_x_1 = np.reshape(np.arange(3 * 33),[3,33]) input_x_2 = np.reshape(np.arange(3 * 40),[3,40]) input_x_3 = np.reshape(np.arange(3 * 40),[3,40]) q_mask = np.ones((3,33)) a_mask = np.ones((3,40)) a_neg_mask = np.ones((3,40)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) feed_dict = { cnn.question:input_x_1, cnn.answer:input_x_2, # cnn.answer_negative:input_x_3, cnn.q_mask:q_mask, cnn.a_mask:a_mask, cnn.dropout_keep_prob_holder:cnn.dropout_keep # cnn.a_neg_mask:a_neg_mask # cnn.q_pos_overlap:q_pos_embedding, # cnn.q_neg_overlap:q_neg_embedding, # cnn.a_pos_overlap:a_pos_embedding, # cnn.a_neg_overlap:a_neg_embedding, # cnn.q_position:q_position, # cnn.a_pos_position:a_pos_position, # cnn.a_neg_position:a_neg_position } question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict) print( question.shape,answer.shape) print( score)
{ "imported_by": [], "imports": [ "/models/QA_CNN_pairwise.py" ] }
shuishen112/pairwise-rnn
/run.py
from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime,os import models import numpy as np import evaluation import sys import logging import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename('program') logger = logging.getLogger(program) if not os.path.exists(log_filename): os.makedirs(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_qcnn_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() # ckpt = tf.train.get_checkpoint_state("checkpoint") # if ckpt and ckpt.model_checkpoint_path: # # Restores from checkpoint # saver.restore(sess, ckpt.model_checkpoint_path) # if os.path.exists("model") : # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # variable_averages = tf.train.ExponentialMovingAverage( model) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) # for name in variables_to_restore: # print(name) sess.run(tf.global_variables_initializer()) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) best_p1=0 for i in range(args.num_epoches): for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess): # for data in data_helper.getBatch48008(train,alphabet,args.batch_size): _, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data) time_str = datetime.datetime.now().isoformat() print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) #<<<<<<< HEAD # # # if i>0 and i % 5 ==0: # test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) # # predicted_test = predict(model,sess,test_datas,test) # map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) # # logger.info('map_mrr test' +str(map_mrr_test)) # print('map_mrr test' +str(map_mrr_test)) # # test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size) # predicted_test = predict(model,sess,test_datas,dev) # map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test) # # logger.info('map_mrr dev' +str(map_mrr_test)) # print('map_mrr dev' +str(map_mrr_test)) # map,mrr,p1 = map_mrr_test # if p1>best_p1: # best_p1=p1 # filename= "checkpoint/"+args.data+"_"+str(p1)+".model" # save_path = saver.save(sess, filename) # # load_path = saver.restore(sess, model_path) # # import shutil # shutil.rmtree("model") # builder = tf.saved_model.builder.SavedModelBuilder("./model") # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) # builder.save(True) # # #======= test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) predicted_test = predict(model,sess,test_datas,test) map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) logger.info('map_mrr test' +str(map_mrr_test)) print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
#-*- coding:utf-8 -*- import os import numpy as np import tensorflow as tf import string from collections import Counter import pandas as pd from tqdm import tqdm import random from functools import wraps import time import pickle def log_time_delta(func): @wraps(func) def _deco(*args, **kwargs): start = time.time() ret = func(*args, **kwargs) end = time.time() delta = end - start print( "%s runed %.2f seconds"% (func.__name__,delta)) return ret return _deco import tqdm from nltk.corpus import stopwords OVERLAP = 237 class Alphabet(dict): def __init__(self, start_feature_id = 1): self.fid = start_feature_id def add(self, item): idx = self.get(item, None) if idx is None: idx = self.fid self[item] = idx # self[idx] = item self.fid += 1 return idx def dump(self, fname): with open(fname, "w") as out: for k in sorted(self.keys()): out.write("{}\t{}\n".format(k, self[k])) def cut(sentence): tokens = sentence.lower().split() # tokens = [w for w in tokens if w not in stopwords.words('english')] return tokens @log_time_delta def load(dataset, filter = False): data_dir = "data/" + dataset datas = [] for data_name in ['train.txt','test.txt','dev.txt']: data_file = os.path.join(data_dir,data_name) data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0') # data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0') if filter == True: datas.append(removeUnanswerdQuestion(data)) else: datas.append(data) # sub_file = os.path.join(data_dir,'submit.txt') # submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3) # datas.append(submit) return tuple(datas) @log_time_delta def removeUnanswerdQuestion(df): counter= df.groupby("question").apply(lambda group: sum(group["flag"])) questions_have_correct=counter[counter>0].index counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0)) questions_have_uncorrect=counter[counter>0].index counter=df.groupby("question").apply(lambda group: len(group["flag"])) questions_multi=counter[counter>1].index return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index() @log_time_delta def get_alphabet(corpuses=None,dataset=""): pkl_name="temp/"+dataset+".alphabet.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) alphabet = Alphabet(start_feature_id = 0) alphabet.add('[UNK]') alphabet.add('END') count = 0 for corpus in corpuses: for texts in [corpus["question"].unique(),corpus["answer"]]: for sentence in texts: tokens = cut(sentence) for token in set(tokens): alphabet.add(token) print("alphabet size %d" % len(alphabet.keys()) ) if not os.path.exists("temp"): os.mkdir("temp") pickle.dump( alphabet,open(pkl_name,"wb")) return alphabet @log_time_delta def getSubVectorsFromDict(vectors,vocab,dim = 300): embedding = np.zeros((len(vocab),dim)) count = 1 for word in vocab: if word in vectors: count += 1 embedding[vocab[word]]= vectors[word] else: embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist() print( 'word in embedding',count) return embedding def encode_to_split(sentence,alphabet): indices = [] tokens = cut(sentence) seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens] return seq @log_time_delta def load_text_vec(alphabet,filename="",embedding_size = 100): vectors = {} with open(filename,encoding='utf-8') as f: i = 0 for line in f: i += 1 if i % 100000 == 0: print( 'epch %d' % i) items = line.strip().split(' ') if len(items) == 2: vocab_size, embedding_size= items[0],items[1] print( ( vocab_size, embedding_size)) else: word = items[0] if word in alphabet: vectors[word] = items[1:] print( 'embedding_size',embedding_size) print( 'done') print( 'words found in wor2vec embedding ',len(vectors.keys())) return vectors @log_time_delta def get_embedding(alphabet,dim = 300,language ="en",dataset=""): pkl_name="temp/"+dataset+".subembedding.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) if language=="en": fname = 'embedding/glove.6B/glove.6B.300d.txt' else: fname= "embedding/embedding.200.header_txt" embeddings = load_text_vec(alphabet,fname,embedding_size = dim) sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim) pickle.dump( sub_embeddings,open(pkl_name,"wb")) return sub_embeddings @log_time_delta def get_mini_batch_test(df,alphabet,batch_size): q = [] a = [] pos_overlap = [] for index,row in df.iterrows(): question = encode_to_split(row["question"],alphabet) answer = encode_to_split(row["answer"],alphabet) overlap_pos = overlap_index(row['question'],row['answer']) q.append(question) a.append(answer) pos_overlap.append(overlap_pos) m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in mini_batches: mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) yield(mb_q,mb_a) # calculate the overlap_index def overlap_index(question,answer,stopwords = []): ans_token = cut(answer) qset = set(cut(question)) aset = set(ans_token) a_len = len(ans_token) # q_index = np.arange(1,q_len) a_index = np.arange(1,a_len + 1) overlap = qset.intersection(aset) # for i,q in enumerate(cut(question)[:q_len]): # value = 1 # if q in overlap: # value = 2 # q_index[i] = value for i,a in enumerate(ans_token): if a in overlap: a_index[i] = OVERLAP return a_index def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False): q,a,neg_a=[],[],[] answers=df["answer"][:250] ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict() for question in tqdm(df['question'].unique()): index= ground_truth[question] canindates = [i for i in range(250)] canindates.remove(index) a_neg_index = random.choice(canindates) seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(answers[index],alphabet) seq_neg_a = encode_to_split(answers[a_neg_index],alphabet) q.append(seq_q) a.append( seq_a) neg_a.append(seq_neg_a ) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False): if sort_by_len: sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True) q = [ q[i] for i in sorted_index] a = [a[i] for i in sorted_index] neg_a = [ neg_a[i] for i in sorted_index] pos_overlap = [pos_overlap[i] for i in sorted_index] neg_overlap = [neg_overlap[i] for i in sorted_index] #get batch m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) if shuffle: np.random.shuffle(idx_list) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in tqdm(mini_batches): mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_neg_a = [ neg_a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_neg_overlap = [neg_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap) # mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap) # mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a) yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask) def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None): q = [] a = [] neg_a = [] for question in df['question'].unique(): # group = df[df["question"]==question] # pos_answers = group[df["flag"] == 1]["answer"] # neg_answers = group[df["flag"] == 0]["answer"].reset_index() group = df[df["question"]==question] pos_answers = group[group["flag"] == 1]["answer"] neg_answers = group[group["flag"] == 0]["answer"]#.reset_index() for pos in pos_answers: if model is not None and sess is not None: pos_sent= encode_to_split(pos,alphabet) q_sent,q_mask= prepare_data([pos_sent]) neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers] a_sent,a_mask= prepare_data(neg_sents) scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask)) neg_index = scores.argmax() else: if len(neg_answers.index) > 0: neg_index = np.random.choice(neg_answers.index) neg = neg_answers.reset_index().loc[neg_index,]["answer"] seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(pos,alphabet) seq_neg_a = encode_to_split(neg,alphabet) q.append(seq_q) a.append(seq_a) neg_a.append(seq_neg_a) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def prepare_data(seqs,overlap = None): lengths = [len(seq) for seq in seqs] n_samples = len(seqs) max_len = np.max(lengths) x = np.zeros((n_samples,max_len)).astype('int32') if overlap is not None: overlap_position = np.zeros((n_samples,max_len)).astype('float') for idx ,seq in enumerate(seqs): x[idx,:lengths[idx]] = seq overlap_position[idx,:lengths[idx]] = overlap[idx] return x,overlap_position else: x_mask = np.zeros((n_samples, max_len)).astype('float') for idx, seq in enumerate(seqs): x[idx, :lengths[idx]] = seq x_mask[idx, :lengths[idx]] = 1.0 # print( x, x_mask) return x, x_mask # def prepare_data(seqs): # lengths = [len(seq) for seq in seqs] # n_samples = len(seqs) # max_len = np.max(lengths) # x = np.zeros((n_samples, max_len)).astype('int32') # x_mask = np.zeros((n_samples, max_len)).astype('float') # for idx, seq in enumerate(seqs): # x[idx, :lengths[idx]] = seq # x_mask[idx, :lengths[idx]] = 1.0 # # print( x, x_mask) # return x, x_mask def getLogger(): import sys import logging import os import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) if not os.path.exists(log_filename): os.mkdir(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) return logger --- FILE SEPARATOR --- class Singleton(object): __instance=None def __init__(self): pass def getInstance(self): if Singleton.__instance is None: # Singleton.__instance=object.__new__(cls,*args,**kwd) Singleton.__instance=self.get_test_flag() print("build FLAGS over") return Singleton.__instance def get_test_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_rnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") # flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('data','trec','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_cnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_qcnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "qcnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','mean','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_8008_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS if __name__=="__main__": args=Singleton().get_test_flag() for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value)))
{ "imported_by": [], "imports": [ "/data_helper.py", "/config.py" ] }
shuishen112/pairwise-rnn
/test.py
# -*- coding: utf-8 -*- from tensorflow import flags import tensorflow as tf from config import Singleton import data_helper import datetime import os import models import numpy as np import evaluation from data_helper import log_time_delta,getLogger logger=getLogger() args = Singleton().get_rnn_flag() #args = Singleton().get_8008_flag() args._parse_flags() opts=dict() logger.info("\nParameters:") for attr, value in sorted(args.__flags.items()): logger.info(("{}={}".format(attr.upper(), value))) opts[attr]=value train,test,dev = data_helper.load(args.data,filter = args.clean) q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) logger.info('the number of words :%d '%len(alphabet)) if args.data=="quora" or args.data=="8008" : print("cn embedding") embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) train_data_loader = data_helper.getBatch48008 else: embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) train_data_loader = data_helper.get_mini_batch opts["embeddings"] =embedding opts["vocab_size"]=len(alphabet) opts["max_input_right"]=a_max_sent_length opts["max_input_left"]=q_max_sent_length opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) print("innitilize over") #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): with tf.Graph().as_default(): # with tf.device("/cpu:0"): session_conf = tf.ConfigProto() session_conf.allow_soft_placement = args.allow_soft_placement session_conf.log_device_placement = args.log_device_placement session_conf.gpu_options.allow_growth = True sess = tf.Session(config=session_conf) model=models.setup(opts) model.build_graph() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # fun first than print or save ckpt = tf.train.get_checkpoint_state("checkpoint") if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint saver.restore(sess, ckpt.model_checkpoint_path) print(sess.run(model.position_embedding)[0]) if os.path.exists("model") : import shutil shutil.rmtree("model") builder = tf.saved_model.builder.SavedModelBuilder("./model") builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) builder.save(True) variable_averages = tf.train.ExponentialMovingAverage( model) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) for name in variables_to_restore: print(name) @log_time_delta def predict(model,sess,batch,test): scores = [] for data in batch: score = model.predict(sess,data) scores.extend(score) return np.array(scores[:len(test)]) text = "怎么 提取 公积金 ?" splited_text=data_helper.encode_to_split(text,alphabet) mb_q,mb_q_mask = data_helper.prepare_data([splited_text]) mb_a,mb_a_mask = data_helper.prepare_data([splited_text]) data = (mb_q,mb_a,mb_q_mask,mb_a_mask) score = model.predict(sess,data) print(score) feed_dict = { model.question:data[0], model.answer:data[1], model.q_mask:data[2], model.a_mask:data[3], model.dropout_keep_prob_holder:1.0 } sess.run(model.position_embedding,feed_dict=feed_dict)[0]
#-*- coding:utf-8 -*- import os import numpy as np import tensorflow as tf import string from collections import Counter import pandas as pd from tqdm import tqdm import random from functools import wraps import time import pickle def log_time_delta(func): @wraps(func) def _deco(*args, **kwargs): start = time.time() ret = func(*args, **kwargs) end = time.time() delta = end - start print( "%s runed %.2f seconds"% (func.__name__,delta)) return ret return _deco import tqdm from nltk.corpus import stopwords OVERLAP = 237 class Alphabet(dict): def __init__(self, start_feature_id = 1): self.fid = start_feature_id def add(self, item): idx = self.get(item, None) if idx is None: idx = self.fid self[item] = idx # self[idx] = item self.fid += 1 return idx def dump(self, fname): with open(fname, "w") as out: for k in sorted(self.keys()): out.write("{}\t{}\n".format(k, self[k])) def cut(sentence): tokens = sentence.lower().split() # tokens = [w for w in tokens if w not in stopwords.words('english')] return tokens @log_time_delta def load(dataset, filter = False): data_dir = "data/" + dataset datas = [] for data_name in ['train.txt','test.txt','dev.txt']: data_file = os.path.join(data_dir,data_name) data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0') # data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0') if filter == True: datas.append(removeUnanswerdQuestion(data)) else: datas.append(data) # sub_file = os.path.join(data_dir,'submit.txt') # submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3) # datas.append(submit) return tuple(datas) @log_time_delta def removeUnanswerdQuestion(df): counter= df.groupby("question").apply(lambda group: sum(group["flag"])) questions_have_correct=counter[counter>0].index counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0)) questions_have_uncorrect=counter[counter>0].index counter=df.groupby("question").apply(lambda group: len(group["flag"])) questions_multi=counter[counter>1].index return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index() @log_time_delta def get_alphabet(corpuses=None,dataset=""): pkl_name="temp/"+dataset+".alphabet.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) alphabet = Alphabet(start_feature_id = 0) alphabet.add('[UNK]') alphabet.add('END') count = 0 for corpus in corpuses: for texts in [corpus["question"].unique(),corpus["answer"]]: for sentence in texts: tokens = cut(sentence) for token in set(tokens): alphabet.add(token) print("alphabet size %d" % len(alphabet.keys()) ) if not os.path.exists("temp"): os.mkdir("temp") pickle.dump( alphabet,open(pkl_name,"wb")) return alphabet @log_time_delta def getSubVectorsFromDict(vectors,vocab,dim = 300): embedding = np.zeros((len(vocab),dim)) count = 1 for word in vocab: if word in vectors: count += 1 embedding[vocab[word]]= vectors[word] else: embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist() print( 'word in embedding',count) return embedding def encode_to_split(sentence,alphabet): indices = [] tokens = cut(sentence) seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens] return seq @log_time_delta def load_text_vec(alphabet,filename="",embedding_size = 100): vectors = {} with open(filename,encoding='utf-8') as f: i = 0 for line in f: i += 1 if i % 100000 == 0: print( 'epch %d' % i) items = line.strip().split(' ') if len(items) == 2: vocab_size, embedding_size= items[0],items[1] print( ( vocab_size, embedding_size)) else: word = items[0] if word in alphabet: vectors[word] = items[1:] print( 'embedding_size',embedding_size) print( 'done') print( 'words found in wor2vec embedding ',len(vectors.keys())) return vectors @log_time_delta def get_embedding(alphabet,dim = 300,language ="en",dataset=""): pkl_name="temp/"+dataset+".subembedding.pkl" if os.path.exists(pkl_name): return pickle.load(open(pkl_name,"rb")) if language=="en": fname = 'embedding/glove.6B/glove.6B.300d.txt' else: fname= "embedding/embedding.200.header_txt" embeddings = load_text_vec(alphabet,fname,embedding_size = dim) sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim) pickle.dump( sub_embeddings,open(pkl_name,"wb")) return sub_embeddings @log_time_delta def get_mini_batch_test(df,alphabet,batch_size): q = [] a = [] pos_overlap = [] for index,row in df.iterrows(): question = encode_to_split(row["question"],alphabet) answer = encode_to_split(row["answer"],alphabet) overlap_pos = overlap_index(row['question'],row['answer']) q.append(question) a.append(answer) pos_overlap.append(overlap_pos) m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in mini_batches: mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) yield(mb_q,mb_a) # calculate the overlap_index def overlap_index(question,answer,stopwords = []): ans_token = cut(answer) qset = set(cut(question)) aset = set(ans_token) a_len = len(ans_token) # q_index = np.arange(1,q_len) a_index = np.arange(1,a_len + 1) overlap = qset.intersection(aset) # for i,q in enumerate(cut(question)[:q_len]): # value = 1 # if q in overlap: # value = 2 # q_index[i] = value for i,a in enumerate(ans_token): if a in overlap: a_index[i] = OVERLAP return a_index def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False): q,a,neg_a=[],[],[] answers=df["answer"][:250] ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict() for question in tqdm(df['question'].unique()): index= ground_truth[question] canindates = [i for i in range(250)] canindates.remove(index) a_neg_index = random.choice(canindates) seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(answers[index],alphabet) seq_neg_a = encode_to_split(answers[a_neg_index],alphabet) q.append(seq_q) a.append( seq_a) neg_a.append(seq_neg_a ) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False): if sort_by_len: sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True) q = [ q[i] for i in sorted_index] a = [a[i] for i in sorted_index] neg_a = [ neg_a[i] for i in sorted_index] pos_overlap = [pos_overlap[i] for i in sorted_index] neg_overlap = [neg_overlap[i] for i in sorted_index] #get batch m = 0 n = len(q) idx_list = np.arange(m,n,batch_size) if shuffle: np.random.shuffle(idx_list) mini_batches = [] for idx in idx_list: mini_batches.append(np.arange(idx,min(idx + batch_size,n))) for mini_batch in tqdm(mini_batches): mb_q = [ q[t] for t in mini_batch] mb_a = [ a[t] for t in mini_batch] mb_neg_a = [ neg_a[t] for t in mini_batch] mb_pos_overlap = [pos_overlap[t] for t in mini_batch] mb_neg_overlap = [neg_overlap[t] for t in mini_batch] mb_q,mb_q_mask = prepare_data(mb_q) mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap) mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap) # mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap) # mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a) yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask) def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None): q = [] a = [] neg_a = [] for question in df['question'].unique(): # group = df[df["question"]==question] # pos_answers = group[df["flag"] == 1]["answer"] # neg_answers = group[df["flag"] == 0]["answer"].reset_index() group = df[df["question"]==question] pos_answers = group[group["flag"] == 1]["answer"] neg_answers = group[group["flag"] == 0]["answer"]#.reset_index() for pos in pos_answers: if model is not None and sess is not None: pos_sent= encode_to_split(pos,alphabet) q_sent,q_mask= prepare_data([pos_sent]) neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers] a_sent,a_mask= prepare_data(neg_sents) scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask)) neg_index = scores.argmax() else: if len(neg_answers.index) > 0: neg_index = np.random.choice(neg_answers.index) neg = neg_answers.reset_index().loc[neg_index,]["answer"] seq_q = encode_to_split(question,alphabet) seq_a = encode_to_split(pos,alphabet) seq_neg_a = encode_to_split(neg,alphabet) q.append(seq_q) a.append(seq_a) neg_a.append(seq_neg_a) return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle) def prepare_data(seqs,overlap = None): lengths = [len(seq) for seq in seqs] n_samples = len(seqs) max_len = np.max(lengths) x = np.zeros((n_samples,max_len)).astype('int32') if overlap is not None: overlap_position = np.zeros((n_samples,max_len)).astype('float') for idx ,seq in enumerate(seqs): x[idx,:lengths[idx]] = seq overlap_position[idx,:lengths[idx]] = overlap[idx] return x,overlap_position else: x_mask = np.zeros((n_samples, max_len)).astype('float') for idx, seq in enumerate(seqs): x[idx, :lengths[idx]] = seq x_mask[idx, :lengths[idx]] = 1.0 # print( x, x_mask) return x, x_mask # def prepare_data(seqs): # lengths = [len(seq) for seq in seqs] # n_samples = len(seqs) # max_len = np.max(lengths) # x = np.zeros((n_samples, max_len)).astype('int32') # x_mask = np.zeros((n_samples, max_len)).astype('float') # for idx, seq in enumerate(seqs): # x[idx, :lengths[idx]] = seq # x_mask[idx, :lengths[idx]] = 1.0 # # print( x, x_mask) # return x, x_mask def getLogger(): import sys import logging import os import time now = int(time.time()) timeArray = time.localtime(now) timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) log_filename = "log/" +time.strftime("%Y%m%d", timeArray) program = os.path.basename(sys.argv[0]) logger = logging.getLogger(program) if not os.path.exists(log_filename): os.mkdir(log_filename) logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w') logging.root.setLevel(level=logging.INFO) logger.info("running %s" % ' '.join(sys.argv)) return logger --- FILE SEPARATOR --- class Singleton(object): __instance=None def __init__(self): pass def getInstance(self): if Singleton.__instance is None: # Singleton.__instance=object.__new__(cls,*args,**kwd) Singleton.__instance=self.get_test_flag() print("build FLAGS over") return Singleton.__instance def get_test_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_rnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") # flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('data','trec','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_cnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "cnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_qcnn_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "qcnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','wiki','data set') flags.DEFINE_string('pooling','mean','max pooling or attentive pooling') flags.DEFINE_boolean('clean',True,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS def get_8008_flag(self): import tensorflow as tf flags = tf.app.flags if len(flags.FLAGS.__dict__.keys())<=2: flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)") flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)") flags.DEFINE_integer("max_len_left", 40, "max document length of left input") flags.DEFINE_integer("max_len_right", 40, "max document length of right input") flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") flags.DEFINE_integer("hidden_size",100,"the default hidden size") flags.DEFINE_string("model_name", "rnn", "cnn or rnn") # Training parameters flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)") flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") flags.DEFINE_string('data','8008','data set') flags.DEFINE_string('pooling','max','max pooling or attentive pooling') flags.DEFINE_boolean('clean',False,'whether we clean the data') flags.DEFINE_string('conv','wide','wide conv or narrow') flags.DEFINE_integer('gpu',0,'gpu number') # Misc Parameters flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") return flags.FLAGS if __name__=="__main__": args=Singleton().get_test_flag() for attr, value in sorted(args.__flags.items()): print(("{}={}".format(attr.upper(), value)))
{ "imported_by": [], "imports": [ "/data_helper.py", "/config.py" ] }
Sssssbo/SDCNet
/SDCNet.py
import datetime import os import time import torch from torch import nn from torch import optim from torch.autograd import Variable from torch.utils.data import DataLoader from torchvision import transforms import pandas as pd import numpy as np import joint_transforms from config import msra10k_path, MTDD_train_path from datasets import ImageFolder_joint from misc import AvgMeter, check_mkdir, cal_sc from model import R3Net, SDCNet from torch.backends import cudnn cudnn.benchmark = True torch.manual_seed(2021) torch.cuda.set_device(6) csv_path = './label_DUTS-TR.csv' ckpt_path = './ckpt' exp_name ='SDCNet' args = { 'iter_num': 30000, 'train_batch_size': 16, 'last_iter': 0, 'lr': 1e-3, 'lr_decay': 0.9, 'weight_decay': 5e-4, 'momentum': 0.9, 'snapshot': '' } joint_transform = joint_transforms.Compose([ joint_transforms.RandomCrop(300), joint_transforms.RandomHorizontallyFlip(), joint_transforms.RandomRotate(10) ]) img_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) target_transform = transforms.ToTensor() to_pil = transforms.ToPILImage() all_data = pd.read_csv(csv_path) train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform) train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)# log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt') def main(): net = SDCNet(num_classes = 5).cuda().train() # print('training in ' + exp_name) optimizer = optim.SGD([ {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'], 'lr': 2 * args['lr']}, {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'], 'lr': args['lr'], 'weight_decay': args['weight_decay']} ], momentum=args['momentum']) if len(args['snapshot']) > 0: print('training resumes from ' + args['snapshot']) net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth'))) optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth'))) optimizer.param_groups[0]['lr'] = 2 * args['lr'] optimizer.param_groups[1]['lr'] = args['lr'] check_mkdir(ckpt_path) check_mkdir(os.path.join(ckpt_path, exp_name)) open(log_path, 'w').write(str(args) + '\n\n') train(net, optimizer) def train(net, optimizer): start_time = time.time() curr_iter = args['last_iter'] num_class = [0, 0, 0, 0, 0] while True: total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter() batch_time = AvgMeter() end = time.time() print('-----begining the first stage, train_mode==0-----') for i, data in enumerate(train_loader): optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num'] ) ** args['lr_decay'] optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num'] ) ** args['lr_decay'] inputs, gt, labels = data print(labels) # depends on the num of classes cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5]) #weight = torch.ones(size=gt.shape) weight = gt.clone().detach() sizec = labels.numpy() #ta = np.zeros(shape=gt.shape) ''' np.zeros(shape=labels.shape) sc = gt.clone().detach() for i in range(len(sizec)): gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))# #print(gta.shape) labels[i] = cal_sc(gta) sizec[i] = labels[i] print(labels) ''' batch_size = inputs.size(0) inputs = Variable(inputs).cuda() gt = Variable(gt).cuda() labels = Variable(labels).cuda() #print(sizec.shape) optimizer.zero_grad() p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1 criterion = nn.BCEWithLogitsLoss().cuda() criterion2 = nn.CrossEntropyLoss().cuda() gt2 = gt.long() gt2 = gt2.squeeze(1) l5 = criterion2(p5, gt2) l4 = criterion2(p4, gt2) l3 = criterion2(p3, gt2) l2 = criterion2(p2, gt2) l1 = criterion2(p1, gt2) loss0 = criterion(predict11, gt) loss10 = criterion(predict10, gt) loss9 = criterion(predict9, gt) loss8 = criterion(predict8, gt) loss7 = criterion(predict7, gt) loss6 = criterion(predict6, gt) loss5 = criterion(predict5, gt) loss4 = criterion(predict4, gt) loss3 = criterion(predict3, gt) loss2 = criterion(predict2, gt) loss1 = criterion(predict1, gt) total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10 total_loss.backward() optimizer.step() total_loss_record.update(total_loss.item(), batch_size) loss1_record.update(l5.item(), batch_size) loss0_record.update(loss0.item(), batch_size) curr_iter += 1.0 batch_time.update(time.time() - end) end = time.time() log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \ '[l5 %.5f], [loss0 %.5f]\n' \ '[lr %.13f], [time %.4f]' % \ (curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'], batch_time.avg) print(log) print('Num of class:', num_class) open(log_path, 'a').write(log + '\n') if curr_iter == args['iter_num']: torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter)) torch.save(optimizer.state_dict(), os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter)) total_time = time.time() - start_time print(total_time) return if __name__ == '__main__': main()
import torch import torch.nn.functional as F from torch import nn from resnext import ResNeXt101 class R3Net(nn.Module): def __init__(self): super(R3Net, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reduce_low = nn.Sequential( nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce_high = nn.Sequential( nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True def forward(self, x, label = None): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] reduce_low = self.reduce_low(torch.cat(( layer0, F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True), F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1)) reduce_high = self.reduce_high(torch.cat(( layer3, F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1)) reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True) predict0 = self.predict0(reduce_high) predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0 predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1 predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2 predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3 predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4 predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5 predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True) predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return predict0, predict1, predict2, predict3, predict4, predict5, predict6 return F.sigmoid(predict6) #-------------------------------------------------------------------------------------------- class SDCNet(nn.Module): def __init__(self, num_classes): super(SDCNet, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reducex = nn.Sequential( nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.reduce5 = nn.Sequential( nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce6 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce7 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce8 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce9 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce10 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # --------------extra module--------------- self.reduce3_0 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_1 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_2 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_4 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_0 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_1 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_3 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_4 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_0 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_2 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_3 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_4 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_0 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_1 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_2 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_3 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_4 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict7 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict8 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict9 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict10 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.pre4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.reducex_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc0 = nn.Sequential( nn.BatchNorm1d(256), nn.Dropout(0.5), nn.Linear(256, num_classes), ) def forward(self, x, c): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] l1_size = layer1.size()[2:] l2_size = layer2.size()[2:] l3_size = layer3.size()[2:] F1 = self.reducex(layer4) p4 = self.pre4(F1) p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True) F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1)) p3 = self.pre3(F0_3) p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True) F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True) F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1)) p2 = self.pre2(F0_2) p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True) F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True) F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1)) p1 = self.pre1(F0_1) p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True) p5 = p4 + p3 + p2 + p1 #saliency detect predict1 = self.predict1(F1) predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True) F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F2 = F1[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F2[i, :, :, :] = self.reduce3_0( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F2[i, :, :, :] = self.reduce3_1( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F2[i, :, :, :] = self.reduce3_2( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F2[i, :, :, :] = self.reduce3_3( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F2[i, :, :, :] = self.reduce3_4( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) predict2 = self.predict2(F2) + predict1 predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True) F3 = F2[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F3[i, :, :, :] = self.reduce2_0( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F3[i, :, :, :] = self.reduce2_1( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F3[i, :, :, :] = self.reduce2_2( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F3[i, :, :, :] = self.reduce2_3( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F3[i, :, :, :] = self.reduce2_4( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) predict3 = self.predict3(F3) + predict2 predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True) F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True) F4 = F3[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F4[i, :, :, :] = self.reduce1_0( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F4[i, :, :, :] = self.reduce1_1( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F4[i, :, :, :] = self.reduce1_2( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F4[i, :, :, :] = self.reduce1_3( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F4[i, :, :, :] = self.reduce1_4( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) predict4 = self.predict4(F4) + predict3 F5 = self.reduce5(torch.cat((F4, layer0), 1)) predict5 = self.predict5(F5) + predict4 F0 = F4[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 1: F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 2: F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 3: F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 4: F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0)) F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True) F6 = self.reduce6(torch.cat((F0, F5), 1)) F7 = self.reduce7(torch.cat((F0, F4), 1)) F8 = self.reduce8(torch.cat((F0, F3), 1)) F9 = self.reduce9(torch.cat((F0, F2), 1)) F10 = self.reduce10(torch.cat((F0, F1), 1)) predict6 = self.predict6(F6) + predict5 predict7 = self.predict7(F7) + predict6 predict8 = self.predict8(F8) + predict7 predict9 = self.predict9(F9) + predict8 predict10 = self.predict10(F10) + predict9 predict11 = predict6 + predict7 + predict8 + predict9 + predict10 predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True) predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True) predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True) predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True) predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 return F.sigmoid(predict11) #---------------------------------------------------------------------------------------- class _ASPP(nn.Module): def __init__(self, in_dim): super(_ASPP, self).__init__() down_dim = in_dim // 2 self.conv1 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv2 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv3 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv4 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv5 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.fuse = nn.Sequential( nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU() ) def forward(self, x): conv1 = self.conv1(x) conv2 = self.conv2(x) conv3 = self.conv3(x) conv4 = self.conv4(x) conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear', align_corners=True) return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1)) --- FILE SEPARATOR --- import os import os.path import torch.utils.data as data from PIL import Image class ImageFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] self.label_list = label_list for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __len__(self): return len(self.label_list) def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label class ImageFolder_joint_for_edge(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') target_edge = Image.open(edge_path).convert('L') if self.joint_transform is not None: if img.size != target.size or img.size != target_edge.size: print("error path:", img_path, gt_path) print("size:", img.size, target.size, target_edge.size) img, target, target_edge = self.joint_transform(img, target, target_edge) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) target_edge = self.target_transform(target_edge) return img, target, target_edge, label def __len__(self): return len(self.imgs) class TestFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label, img_path def __len__(self): return len(self.imgs) def make_dataset(root): img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')] return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list] class ImageFolder(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, root, joint_transform=None, transform=None, target_transform=None): self.root = root self.imgs = make_dataset(root) self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.imgs) --- FILE SEPARATOR --- import numpy as np import os import pylab as pl #import pydensecrf.densecrf as dcrf class AvgMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def check_mkdir(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) def cal_precision_recall_mae(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 prediction = prediction / 255. gt = gt / 255. mae = np.mean(np.abs(prediction - gt)) hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds for threshold in range(256): threshold = threshold / 255. hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 #false_pred = np.zeros(prediction.shape) #false_prediction[prediction < threshold] = 1 a = prediction.shape tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #for roc #fp = np.sum(false_pred * hard_gt) #tpr = (tp + eps)/(a + eps) fp = p - tp #TPR.append(tpr) FP.append(fp) precision.append((tp + eps) / (p + eps)) recall.append((tp + eps) / (t + eps)) return precision, recall, mae#, TPR, FP def cal_fmeasure(precision, recall): assert len(precision) == 256 assert len(recall) == 256 beta_square = 0.3 max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)]) return max_fmeasure def cal_sizec(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 #print(gt.shape) prediction = prediction / 255. gt = gt / 255. hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds best_threshold = 0 best_F = 0 for threshold in range(256): threshold = threshold / 255. gt_size = np.ones(prediction.shape) a = np.sum(gt_size) hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #print(a, p) precision = (tp + eps) / (p + eps) recall = (tp + eps) / (t + eps) beta_square = 0.3 fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall) if fmeasure > best_F: best_threshold = threshold*255 best_F = fmeasure sm_size = p / a if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec, best_threshold#, TPR, FP def cal_sc(gt): # input should be np array with data type uint8 assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. #print(gt.shape) img_size = np.ones(gt.shape) a = np.sum(img_size) hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 p = np.sum(hard_gt) b = np.sum(gt) sm_size = float(p) / float(a) #print(p, a, sm_size, b) #print(gt) if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec def pr_cruve(precision, recall): assert len(precision) == 256 assert len(recall) == 256 r = [a[1] for a in zip(precision, recall)] p = [a[0] for a in zip(precision, recall)] pl.title('PR curve') pl.xlabel('Recall') pl.xlabel('Precision') pl.plot(r, p) pl.show() # for define the size type of the salient object def size_aware(gt): assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) pic = np.size(hard_gt) rate = t/pic return rate # # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf # def crf_refine(img, annos): # def _sigmoid(x): # return 1 / (1 + np.exp(-x)) # assert img.dtype == np.uint8 # assert annos.dtype == np.uint8 # assert img.shape[:2] == annos.shape # # img and annos should be np array with data type uint8 # EPSILON = 1e-8 # M = 2 # salient or not # tau = 1.05 # # Setup the CRF model # d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M) # anno_norm = annos / 255. # n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm)) # p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm)) # U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32') # U[0, :] = n_energy.flatten() # U[1, :] = p_energy.flatten() # d.setUnaryEnergy(U) # d.addPairwiseGaussian(sxy=3, compat=3) # d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5) # # Do the inference # infer = np.array(d.inference(1)).astype('float32') # res = infer[1, :] # res = res * 255 # res = res.reshape(img.shape[:2]) # return res.astype('uint8')
{ "imported_by": [], "imports": [ "/model.py", "/datasets.py", "/misc.py" ] }
Sssssbo/SDCNet
/create_free.py
import numpy as np import os import torch from PIL import Image from torch.autograd import Variable from torchvision import transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm import cv2 import numpy as np from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure from datasets import TestFolder_joint import joint_transforms from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C torch.manual_seed(2018) # set which gpu to use torch.cuda.set_device(0) ckpt_path = './ckpt' test_path = './test_ECSSD.csv' def main(): img = np.zeros((512, 512),dtype = np.uint8) img2 = cv2.imread('./0595.PNG', 0) cv2.imshow('img',img2) #cv2.waitKey(0) print(img, img2) Image.fromarray(img).save('./free.png') if __name__ == '__main__': main()
import os import os.path import torch.utils.data as data from PIL import Image class ImageFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] self.label_list = label_list for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __len__(self): return len(self.label_list) def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label class ImageFolder_joint_for_edge(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') target_edge = Image.open(edge_path).convert('L') if self.joint_transform is not None: if img.size != target.size or img.size != target_edge.size: print("error path:", img_path, gt_path) print("size:", img.size, target.size, target_edge.size) img, target, target_edge = self.joint_transform(img, target, target_edge) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) target_edge = self.target_transform(target_edge) return img, target, target_edge, label def __len__(self): return len(self.imgs) class TestFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label, img_path def __len__(self): return len(self.imgs) def make_dataset(root): img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')] return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list] class ImageFolder(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, root, joint_transform=None, transform=None, target_transform=None): self.root = root self.imgs = make_dataset(root) self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.imgs) --- FILE SEPARATOR --- import numpy as np import os import pylab as pl #import pydensecrf.densecrf as dcrf class AvgMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def check_mkdir(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) def cal_precision_recall_mae(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 prediction = prediction / 255. gt = gt / 255. mae = np.mean(np.abs(prediction - gt)) hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds for threshold in range(256): threshold = threshold / 255. hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 #false_pred = np.zeros(prediction.shape) #false_prediction[prediction < threshold] = 1 a = prediction.shape tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #for roc #fp = np.sum(false_pred * hard_gt) #tpr = (tp + eps)/(a + eps) fp = p - tp #TPR.append(tpr) FP.append(fp) precision.append((tp + eps) / (p + eps)) recall.append((tp + eps) / (t + eps)) return precision, recall, mae#, TPR, FP def cal_fmeasure(precision, recall): assert len(precision) == 256 assert len(recall) == 256 beta_square = 0.3 max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)]) return max_fmeasure def cal_sizec(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 #print(gt.shape) prediction = prediction / 255. gt = gt / 255. hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds best_threshold = 0 best_F = 0 for threshold in range(256): threshold = threshold / 255. gt_size = np.ones(prediction.shape) a = np.sum(gt_size) hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #print(a, p) precision = (tp + eps) / (p + eps) recall = (tp + eps) / (t + eps) beta_square = 0.3 fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall) if fmeasure > best_F: best_threshold = threshold*255 best_F = fmeasure sm_size = p / a if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec, best_threshold#, TPR, FP def cal_sc(gt): # input should be np array with data type uint8 assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. #print(gt.shape) img_size = np.ones(gt.shape) a = np.sum(img_size) hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 p = np.sum(hard_gt) b = np.sum(gt) sm_size = float(p) / float(a) #print(p, a, sm_size, b) #print(gt) if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec def pr_cruve(precision, recall): assert len(precision) == 256 assert len(recall) == 256 r = [a[1] for a in zip(precision, recall)] p = [a[0] for a in zip(precision, recall)] pl.title('PR curve') pl.xlabel('Recall') pl.xlabel('Precision') pl.plot(r, p) pl.show() # for define the size type of the salient object def size_aware(gt): assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) pic = np.size(hard_gt) rate = t/pic return rate # # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf # def crf_refine(img, annos): # def _sigmoid(x): # return 1 / (1 + np.exp(-x)) # assert img.dtype == np.uint8 # assert annos.dtype == np.uint8 # assert img.shape[:2] == annos.shape # # img and annos should be np array with data type uint8 # EPSILON = 1e-8 # M = 2 # salient or not # tau = 1.05 # # Setup the CRF model # d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M) # anno_norm = annos / 255. # n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm)) # p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm)) # U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32') # U[0, :] = n_energy.flatten() # U[1, :] = p_energy.flatten() # d.setUnaryEnergy(U) # d.addPairwiseGaussian(sxy=3, compat=3) # d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5) # # Do the inference # infer = np.array(d.inference(1)).astype('float32') # res = infer[1, :] # res = res * 255 # res = res.reshape(img.shape[:2]) # return res.astype('uint8')
{ "imported_by": [], "imports": [ "/datasets.py", "/misc.py" ] }
Sssssbo/SDCNet
/infer_SDCNet.py
import numpy as np import os import torch import torch.nn.functional as F from PIL import Image from torch.autograd import Variable from torchvision import transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import pandas as pd from tqdm import tqdm from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc from datasets import TestFolder_joint import joint_transforms from model import R3Net, SDCNet torch.manual_seed(2021) # set which gpu to use torch.cuda.set_device(6) # the following two args specify the location of the file of trained model (pth extension) # you should have the pth file in the folder './$ckpt_path$/$exp_name$' ckpt_path = './ckpt' exp_name = 'SDCNet' msra10k_path = './SOD_label/label_msra10k.csv' ecssd_path = './SOD_label/label_ECSSD.csv' dutomrom_path = './SOD_label/label_DUT-OMROM.csv' dutste_path = './SOD_label/label_DUTS-TE.csv' hkuis_path = './SOD_label/label_HKU-IS.csv' pascals_path = './SOD_label/label_PASCAL-S.csv' sed2_path = './SOD_label/label_SED2.csv' socval_path = './SOD_label/label_SOC-Val.csv' sod_path = './SOD_label/label_SOD.csv' thur15k_path = './SOD_label/label_THUR-15K.csv' args = { 'snapshot': '30000', # your snapshot filename (exclude extension name) 'save_results': True, # whether to save the resulting masks 'test_mode': 1 } joint_transform = joint_transforms.Compose([ #joint_transforms.RandomCrop(300), #joint_transforms.RandomHorizontallyFlip(), #joint_transforms.RandomRotate(10) ]) img_transform = transforms.Compose([ transforms.Resize((300, 300)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) target_transform = transforms.ToTensor() to_pil = transforms.ToPILImage() to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path} def main(): net = SDCNet(num_classes = 5).cuda() print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode'])) print(exp_name) net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth'))) net.eval() results = {} with torch.no_grad(): for name, root in to_test.items(): print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name)) test_data = pd.read_csv(root) test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform) test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False) precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)] mae0_record = AvgMeter() mae1_record = AvgMeter() mae2_record = AvgMeter() mae3_record = AvgMeter() mae4_record = AvgMeter() mae5_record = AvgMeter() mae6_record = AvgMeter() n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0 if args['save_results']: check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot']))) for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)): shape = gt.size()[2:] img_var = Variable(inputs).cuda() img = np.array(to_pil(img_var.data.squeeze(0).cpu())) gt = np.array(to_pil(gt.data.squeeze(0).cpu())) sizec = labels.numpy() pred2021 = net(img_var, sizec) pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True) pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu())) if labels == 0: precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision1, recall1)): p, r = pdata precision1_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall1_record[pidx].update(r) mae1_record.update(mae1) n1 += 1 elif labels == 1: precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision2, recall2)): p, r = pdata precision2_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall2_record[pidx].update(r) mae2_record.update(mae2) n2 += 1 elif labels == 2: precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision3, recall3)): p, r = pdata precision3_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall3_record[pidx].update(r) mae3_record.update(mae3) n3 += 1 elif labels == 3: precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision4, recall4)): p, r = pdata precision4_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall4_record[pidx].update(r) mae4_record.update(mae4) n4 += 1 elif labels == 4: precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision5, recall5)): p, r = pdata precision5_record[pidx].update(p) #print('Presicion:', p, 'Recall:', r) recall5_record[pidx].update(r) mae5_record.update(mae5) n5 += 1 precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt) for pidx, pdata in enumerate(zip(precision6, recall6)): p, r = pdata precision6_record[pidx].update(p) recall6_record[pidx].update(r) mae6_record.update(mae6) img_name = os.path.split(str(img_path))[1] img_name = os.path.splitext(img_name)[0] n0 += 1 if args['save_results']: Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % ( name, args['snapshot']), img_name + '_2021.png')) fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record], [rrecord.avg for rrecord in recall1_record]) fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record], [rrecord.avg for rrecord in recall2_record]) fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record], [rrecord.avg for rrecord in recall3_record]) fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record], [rrecord.avg for rrecord in recall4_record]) fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record], [rrecord.avg for rrecord in recall5_record]) fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record], [rrecord.avg for rrecord in recall6_record]) results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2, 'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg, 'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5, 'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg} print('test results:') print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\ '[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\ '[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\ '[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\ '[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\ '[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\ (fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0)) def accuracy(y_pred, y_actual, topk=(1,)): """Computes the precision@k for the specified values of k""" final_acc = 0 maxk = max(topk) # for prob_threshold in np.arange(0, 1, 0.01): PRED_COUNT = y_actual.size(0) PRED_CORRECT_COUNT = 0 prob, pred = y_pred.topk(maxk, 1, True, True) # prob = np.where(prob > prob_threshold, prob, 0) for j in range(pred.size(0)): if int(y_actual[j]) == int(pred[j]): PRED_CORRECT_COUNT += 1 if PRED_COUNT == 0: final_acc = 0 else: final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT) return final_acc * 100, PRED_COUNT if __name__ == '__main__': main()
import torch import torch.nn.functional as F from torch import nn from resnext import ResNeXt101 class R3Net(nn.Module): def __init__(self): super(R3Net, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reduce_low = nn.Sequential( nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce_high = nn.Sequential( nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True def forward(self, x, label = None): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] reduce_low = self.reduce_low(torch.cat(( layer0, F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True), F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1)) reduce_high = self.reduce_high(torch.cat(( layer3, F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1)) reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True) predict0 = self.predict0(reduce_high) predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0 predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1 predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2 predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3 predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4 predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5 predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True) predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return predict0, predict1, predict2, predict3, predict4, predict5, predict6 return F.sigmoid(predict6) #-------------------------------------------------------------------------------------------- class SDCNet(nn.Module): def __init__(self, num_classes): super(SDCNet, self).__init__() res50 = ResNeXt101() self.layer0 = res50.layer0 self.layer1 = res50.layer1 self.layer2 = res50.layer2 self.layer3 = res50.layer3 self.layer4 = res50.layer4 self.reducex = nn.Sequential( nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), _ASPP(256) ) self.reduce5 = nn.Sequential( nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce6 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce7 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce8 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce9 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce10 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # --------------extra module--------------- self.reduce3_0 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_1 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_2 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce3_4 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_0 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_1 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_3 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce2_4 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_0 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_2 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_3 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce1_4 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_0 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_1 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_2 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_3 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reduce0_4 = nn.Sequential( nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) # self.predict0 = nn.Conv2d(256, 1, kernel_size=1) self.predict1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict5 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict6 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict7 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict8 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict9 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.predict10 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 1, kernel_size=1) ) self.pre4 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre3 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre2 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.pre1 = nn.Sequential( nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(), nn.Conv2d(128, 2, kernel_size=1) ) self.reducex_1 = nn.Sequential( nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_2 = nn.Sequential( nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) self.reducex_3 = nn.Sequential( nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(), nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU() ) for m in self.modules(): if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout): m.inplace = True self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc0 = nn.Sequential( nn.BatchNorm1d(256), nn.Dropout(0.5), nn.Linear(256, num_classes), ) def forward(self, x, c): layer0 = self.layer0(x) layer1 = self.layer1(layer0) layer2 = self.layer2(layer1) layer3 = self.layer3(layer2) layer4 = self.layer4(layer3) l0_size = layer0.size()[2:] l1_size = layer1.size()[2:] l2_size = layer2.size()[2:] l3_size = layer3.size()[2:] F1 = self.reducex(layer4) p4 = self.pre4(F1) p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True) F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1)) p3 = self.pre3(F0_3) p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True) F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True) F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1)) p2 = self.pre2(F0_2) p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True) F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True) F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1)) p1 = self.pre1(F0_1) p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True) p5 = p4 + p3 + p2 + p1 #saliency detect predict1 = self.predict1(F1) predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True) F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True) F2 = F1[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F2[i, :, :, :] = self.reduce3_0( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F2[i, :, :, :] = self.reduce3_1( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F2[i, :, :, :] = self.reduce3_2( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F2[i, :, :, :] = self.reduce3_3( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F2[i, :, :, :] = self.reduce3_4( torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1)) predict2 = self.predict2(F2) + predict1 predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True) F3 = F2[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F3[i, :, :, :] = self.reduce2_0( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F3[i, :, :, :] = self.reduce2_1( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F3[i, :, :, :] = self.reduce2_2( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F3[i, :, :, :] = self.reduce2_3( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F3[i, :, :, :] = self.reduce2_4( torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1)) predict3 = self.predict3(F3) + predict2 predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True) F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True) F4 = F3[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F4[i, :, :, :] = self.reduce1_0( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 1: F4[i, :, :, :] = self.reduce1_1( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 2: F4[i, :, :, :] = self.reduce1_2( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 3: F4[i, :, :, :] = self.reduce1_3( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) elif c[i] == 4: F4[i, :, :, :] = self.reduce1_4( torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1)) predict4 = self.predict4(F4) + predict3 F5 = self.reduce5(torch.cat((F4, layer0), 1)) predict5 = self.predict5(F5) + predict4 F0 = F4[:, :, :, :].clone().detach() for i in range(len(c)): if c[i] == 0: F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 1: F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 2: F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 3: F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0)) elif c[i] == 4: F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0)) F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True) F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True) F6 = self.reduce6(torch.cat((F0, F5), 1)) F7 = self.reduce7(torch.cat((F0, F4), 1)) F8 = self.reduce8(torch.cat((F0, F3), 1)) F9 = self.reduce9(torch.cat((F0, F2), 1)) F10 = self.reduce10(torch.cat((F0, F1), 1)) predict6 = self.predict6(F6) + predict5 predict7 = self.predict7(F7) + predict6 predict8 = self.predict8(F8) + predict7 predict9 = self.predict9(F9) + predict8 predict10 = self.predict10(F10) + predict9 predict11 = predict6 + predict7 + predict8 + predict9 + predict10 predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True) predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True) predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True) predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True) predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True) predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True) predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True) predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True) predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True) predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True) predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True) if self.training: return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 return F.sigmoid(predict11) #---------------------------------------------------------------------------------------- class _ASPP(nn.Module): def __init__(self, in_dim): super(_ASPP, self).__init__() down_dim = in_dim // 2 self.conv1 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv2 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv3 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv4 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.conv5 = nn.Sequential( nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU() ) self.fuse = nn.Sequential( nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU() ) def forward(self, x): conv1 = self.conv1(x) conv2 = self.conv2(x) conv3 = self.conv3(x) conv4 = self.conv4(x) conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear', align_corners=True) return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1)) --- FILE SEPARATOR --- import os import os.path import torch.utils.data as data from PIL import Image class ImageFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] self.label_list = label_list for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __len__(self): return len(self.label_list) def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label class ImageFolder_joint_for_edge(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') target_edge = Image.open(edge_path).convert('L') if self.joint_transform is not None: if img.size != target.size or img.size != target_edge.size: print("error path:", img_path, gt_path) print("size:", img.size, target.size, target_edge.size) img, target, target_edge = self.joint_transform(img, target, target_edge) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) target_edge = self.target_transform(target_edge) return img, target, target_edge, label def __len__(self): return len(self.imgs) class TestFolder_joint(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): imgs = [] for index, row in label_list.iterrows(): imgs.append((row['img_path'], row['gt_path'], row['label'])) self.imgs = imgs self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path, label = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, label, img_path def __len__(self): return len(self.imgs) def make_dataset(root): img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')] return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list] class ImageFolder(data.Dataset): # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) def __init__(self, root, joint_transform=None, transform=None, target_transform=None): self.root = root self.imgs = make_dataset(root) self.joint_transform = joint_transform self.transform = transform self.target_transform = target_transform def __getitem__(self, index): img_path, gt_path = self.imgs[index] img = Image.open(img_path).convert('RGB') target = Image.open(gt_path).convert('L') if self.joint_transform is not None: img, target = self.joint_transform(img, target) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target def __len__(self): return len(self.imgs) --- FILE SEPARATOR --- import numpy as np import os import pylab as pl #import pydensecrf.densecrf as dcrf class AvgMeter(object): def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def check_mkdir(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) def cal_precision_recall_mae(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 prediction = prediction / 255. gt = gt / 255. mae = np.mean(np.abs(prediction - gt)) hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds for threshold in range(256): threshold = threshold / 255. hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 #false_pred = np.zeros(prediction.shape) #false_prediction[prediction < threshold] = 1 a = prediction.shape tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #for roc #fp = np.sum(false_pred * hard_gt) #tpr = (tp + eps)/(a + eps) fp = p - tp #TPR.append(tpr) FP.append(fp) precision.append((tp + eps) / (p + eps)) recall.append((tp + eps) / (t + eps)) return precision, recall, mae#, TPR, FP def cal_fmeasure(precision, recall): assert len(precision) == 256 assert len(recall) == 256 beta_square = 0.3 max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)]) return max_fmeasure def cal_sizec(prediction, gt): # input should be np array with data type uint8 assert prediction.dtype == np.uint8 assert gt.dtype == np.uint8 assert prediction.shape == gt.shape eps = 1e-4 #print(gt.shape) prediction = prediction / 255. gt = gt / 255. hard_gt = np.zeros(prediction.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) #t is sum of 1 precision, recall, TPR, FP = [], [], [], [] # calculating precision and recall at 255 different binarizing thresholds best_threshold = 0 best_F = 0 for threshold in range(256): threshold = threshold / 255. gt_size = np.ones(prediction.shape) a = np.sum(gt_size) hard_prediction = np.zeros(prediction.shape) hard_prediction[prediction > threshold] = 1 tp = np.sum(hard_prediction * hard_gt) p = np.sum(hard_prediction) #print(a, p) precision = (tp + eps) / (p + eps) recall = (tp + eps) / (t + eps) beta_square = 0.3 fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall) if fmeasure > best_F: best_threshold = threshold*255 best_F = fmeasure sm_size = p / a if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec, best_threshold#, TPR, FP def cal_sc(gt): # input should be np array with data type uint8 assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. #print(gt.shape) img_size = np.ones(gt.shape) a = np.sum(img_size) hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 p = np.sum(hard_gt) b = np.sum(gt) sm_size = float(p) / float(a) #print(p, a, sm_size, b) #print(gt) if 0 <= sm_size < 0.1: sizec = 0 elif 0.1 <= sm_size < 0.2: sizec = 1 elif 0.2 <= sm_size < 0.3: sizec = 2 elif 0.3 <= sm_size < 0.4: sizec = 3 elif 0.4 <= sm_size <= 1.0: sizec = 4 return sizec def pr_cruve(precision, recall): assert len(precision) == 256 assert len(recall) == 256 r = [a[1] for a in zip(precision, recall)] p = [a[0] for a in zip(precision, recall)] pl.title('PR curve') pl.xlabel('Recall') pl.xlabel('Precision') pl.plot(r, p) pl.show() # for define the size type of the salient object def size_aware(gt): assert gt.dtype == np.uint8 eps = 1e-4 gt = gt / 255. hard_gt = np.zeros(gt.shape) hard_gt[gt > 0.5] = 1 t = np.sum(hard_gt) pic = np.size(hard_gt) rate = t/pic return rate # # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf # def crf_refine(img, annos): # def _sigmoid(x): # return 1 / (1 + np.exp(-x)) # assert img.dtype == np.uint8 # assert annos.dtype == np.uint8 # assert img.shape[:2] == annos.shape # # img and annos should be np array with data type uint8 # EPSILON = 1e-8 # M = 2 # salient or not # tau = 1.05 # # Setup the CRF model # d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M) # anno_norm = annos / 255. # n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm)) # p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm)) # U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32') # U[0, :] = n_energy.flatten() # U[1, :] = p_energy.flatten() # d.setUnaryEnergy(U) # d.addPairwiseGaussian(sxy=3, compat=3) # d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5) # # Do the inference # infer = np.array(d.inference(1)).astype('float32') # res = infer[1, :] # res = res * 255 # res = res.reshape(img.shape[:2]) # return res.astype('uint8')
{ "imported_by": [], "imports": [ "/model.py", "/datasets.py", "/misc.py" ] }
Sssssbo/SDCNet
/model/make_model.py
"import torch\nimport torch.nn as nn\nfrom .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet(...TRUNCATED)
"import math\n\nimport torch\nfrom torch import nn\n\n\ndef conv3x3(in_planes, out_planes, stride=1)(...TRUNCATED)
{ "imported_by": [], "imports": [ "/model/backbones/resnet.py" ] }
Sssssbo/SDCNet
/resnet/__init__.py
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
"from .resnet import ResNet, BasicBlock, Bottleneck\nimport torch\nfrom torch import nn\nfrom .confi(...TRUNCATED)
{ "imported_by": [], "imports": [ "/resnet/make_model.py" ] }
riadghorra/whiteboard-oop-project
/src/client.py
"import socket\nimport json\nimport sys\nimport math\nfrom white_board import WhiteBoard, binary_to_(...TRUNCATED)
"import pygame\nimport pygame.draw\nimport json\nimport sys\nfrom functools import reduce\nimport op(...TRUNCATED)
{ "imported_by": [], "imports": [ "/src/white_board.py" ] }
riadghorra/whiteboard-oop-project
/src/main.py
"from white_board import WhiteBoard\nimport json\n\n'''\nThis file is used to run locally or to debu(...TRUNCATED)
"import pygame\nimport pygame.draw\nimport json\nimport sys\nfrom functools import reduce\nimport op(...TRUNCATED)
{ "imported_by": [], "imports": [ "/src/white_board.py" ] }
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
1