text
stringlengths
1
93.6k
gen_rnn=gen_rnn,
var_mlp_in=var_mlp_in,
var_mlp_out=var_mlp_out,
var_rnn=var_rnn)
SCG.initialize()
compile_start_time = time.time()
# build the attention trajectory sampler
SCG.build_attention_funcs()
# quick test of attention trajectory sampler
Xb = sample_batch(Xtr, bs=32)
result = SCG.sample_attention(Xb, Xb)
visualize_attention(result, pre_tag=result_tag, post_tag="b0")
# build the main model functions (i.e. training and cost functions)
SCG.build_model_funcs()
compile_end_time = time.time()
compile_minutes = (compile_end_time - compile_start_time) / 60.0
print("THEANO COMPILE TIME (MIN): {}".format(compile_minutes))
# TEST SAVE/LOAD FUNCTIONALITY
param_save_file = "{}_params.pkl".format(result_tag)
SCG.save_model_params(param_save_file)
SCG.load_model_params(param_save_file)
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
print("Beginning to train the model...")
out_file = open("{}_results.txt".format(result_tag), 'wb')
out_file.flush()
costs = [0. for i in range(10)]
learn_rate = 0.0001
momentum = 0.95
for i in range(250000):
lr_scale = min(1.0, ((i+1) / 5000.0))
mom_scale = min(1.0, ((i+1) / 10000.0))
if (((i + 1) % 10000) == 0):
learn_rate = learn_rate * 0.95
# set sgd and objective function hyperparams for this update
SCG.set_sgd_params(lr=lr_scale*learn_rate, mom_1=mom_scale*momentum, mom_2=0.99)
SCG.set_lam_kld(lam_kld_q2p=0.95, lam_kld_p2q=0.05, \
lam_kld_amu=0.0, lam_kld_alv=0.1)
# perform a minibatch update and record the cost for this batch
Xb = sample_batch(Xtr, bs=batch_size)
result = SCG.train_joint(Xb, Xb)
costs = [(costs[j] + result[j]) for j in range(len(result))]
# output diagnostic information and checkpoint parameters, etc.
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " total_cost: {0:.4f}".format(costs[0])
str3 = " nll_term : {0:.4f}".format(costs[1])
str4 = " kld_q2p : {0:.4f}".format(costs[2])
str5 = " kld_p2q : {0:.4f}".format(costs[3])
str6 = " kld_amu : {0:.4f}".format(costs[4])
str7 = " kld_alv : {0:.4f}".format(costs[5])
str8 = " reg_term : {0:.4f}".format(costs[6])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 500) == 0):
SCG.save_model_params("{}_params.pkl".format(result_tag))
#############################################
# check model performance on validation set #
#############################################
Xb = sample_batch(Xva, bs=500)
result = SCG.compute_nll_bound(Xb, Xb)
str2 = " va_total_cost: {0:.4f}".format(float(result[0]))
str3 = " va_nll_term : {0:.4f}".format(float(result[1]))
str4 = " va_kld_q2p : {0:.4f}".format(float(result[2]))
str5 = " va_kld_p2q : {0:.4f}".format(float(result[3]))
str6 = " va_kld_amu : {0:.4f}".format(float(result[4]))
str7 = " va_kld_alv : {0:.4f}".format(float(result[5]))
str8 = " va_reg_term : {0:.4f}".format(float(result[6]))
joint_str = "\n".join([str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
###########################################
# sample and draw attention trajectories. #
###########################################
Xb = sample_batch(Xva, bs=32)
result = SCG.sample_attention(Xb, Xb)
post_tag = "b{0:d}".format(i)
visualize_attention(result, pre_tag=result_tag, post_tag=post_tag)
######################################
######################################
## ##
## Test attention-based imputation. ##
## ##
######################################
######################################