text
stringlengths 1
93.6k
|
|---|
### Epoch by Epoch
|
for ite_epoch in range(num_epoch):
|
### Train stack by stack
|
for ite_stack in range(num_TrainingStack):
|
pre_list, cmp_list, sub_list, raw_list = [], [], [], []
|
gc.collect()
|
pre_list, cmp_list, sub_list, raw_list = load_stack("tra", ite_stack)
|
gc.collect()
|
num_batch = int(len(pre_list) / BATCH_SIZE)
|
### Batch by batch
|
for ite_batch in range(num_batch):
|
print("\rstep %1d - epoch %2d/%2d - training stack %2d/%2d - batch %3d/%3d" % \
|
(ite_step, ite_epoch+1, num_epoch, ite_stack+1, num_TrainingStack, ite_batch+1, num_batch), end="")
|
start_index = ite_batch * BATCH_SIZE
|
next_start_index = (ite_batch + 1) * BATCH_SIZE
|
if ite_step == 1:
|
Training_step1.run(session=sess, feed_dict={
|
x1: pre_list[start_index:next_start_index],
|
x2: cmp_list[start_index:next_start_index],
|
x3: sub_list[start_index:next_start_index],
|
x5: raw_list[start_index:next_start_index],
|
is_training: True}) # train
|
else:
|
Training_step2.run(session=sess, feed_dict={
|
x1: pre_list[start_index:next_start_index],
|
x2: cmp_list[start_index:next_start_index],
|
x3: sub_list[start_index:next_start_index],
|
x5: raw_list[start_index:next_start_index],
|
is_training: True})
|
# Update TensorBoard and print result
|
num_TrainingBatch_count += 1
|
if ((ite_batch + 1) == int(num_batch / 2)) or ((ite_batch + 1) == num_batch):
|
summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={
|
x1: pre_list[start_index:next_start_index],
|
x2: cmp_list[start_index:next_start_index],
|
x3: sub_list[start_index:next_start_index],
|
x5: raw_list[start_index:next_start_index],
|
is_training: False})
|
summary_writer.add_summary(summary, num_TrainingBatch_count)
|
print("\rstep %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f" % \
|
(ite_step, ite_epoch+1, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch))
|
file_object.write("step %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f\n" % \
|
(ite_step, ite_epoch+1, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch))
|
file_object.flush()
|
### Store the model of this epoch
|
if ite_step == 1:
|
CheckPoint_path = os.path.join(dir_model, "model_step1.ckpt")
|
else:
|
CheckPoint_path = os.path.join(dir_model, "model_step2.ckpt")
|
saver.save(sess, CheckPoint_path, global_step=ite_epoch)
|
sum_improved_PSNR = 0
|
num_patch_count = 0
|
### Eval stack by stack, and report together for this epoch
|
for ite_stack in range(num_ValidationStack):
|
pre_list, cmp_list, sub_list, raw_list = [], [], [], []
|
gc.collect()
|
pre_list, cmp_list, sub_list, raw_list = load_stack("val", ite_stack)
|
gc.collect()
|
num_batch = int(len(pre_list) / BATCH_SIZE)
|
### Batch by batch
|
for ite_batch in range(num_batch):
|
print("step %1d - epoch %2d/%2d - validation stack %2d/%2d " % \
|
(ite_step, ite_epoch+1, num_epoch, ite_stack+1, num_ValidationStack))
|
start_index = ite_batch * BATCH_SIZE
|
next_start_index = (ite_batch + 1) * BATCH_SIZE
|
delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={
|
x1: pre_list[start_index:next_start_index],
|
x2: cmp_list[start_index:next_start_index],
|
x3: sub_list[start_index:next_start_index],
|
x5: raw_list[start_index:next_start_index],
|
is_training: False})
|
sum_improved_PSNR += delta_PSNR_batch * BATCH_SIZE
|
num_patch_count += BATCH_SIZE
|
if num_patch_count != 0:
|
print("### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###" % \
|
(ite_step, ite_epoch+1, num_epoch, sum_improved_PSNR/num_patch_count))
|
file_object.write("### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###\n" % \
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.