File size: 9,614 Bytes
7393a38 b4b4729 7393a38 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
class Saver(object):
def __init__(self,
checkpoints=5, # save the latest number of checkpoints
output_dir=None, # the output directory
best_score=-1, # the best bleu score before
best_checkpoints=1, # the best checkpoints saved in best checkpoints directory
):
if output_dir is None:
output_dir = "./output"
self.output_dir = output_dir
self.output_best_dir = os.path.join(output_dir, "best")
self.saver = tf.train.Saver(
max_to_keep=checkpoints
)
# handle disrupted checkpoints
if tf.gfile.Exists(self.output_dir):
ckpt = tf.train.get_checkpoint_state(self.output_dir)
if ckpt and ckpt.all_model_checkpoint_paths:
self.saver.recover_last_checkpoints(list(ckpt.all_model_checkpoint_paths))
self.best_saver = tf.train.Saver(
max_to_keep=best_checkpoints,
)
# handle disrupted checkpoints
if tf.gfile.Exists(self.output_best_dir):
ckpt = tf.train.get_checkpoint_state(self.output_best_dir)
if ckpt and ckpt.all_model_checkpoint_paths:
self.best_saver.recover_last_checkpoints(list(ckpt.all_model_checkpoint_paths))
self.best_score = best_score
# check best bleu result
metric_dir = os.path.join(self.output_best_dir, "metric.log")
if tf.gfile.Exists(metric_dir):
metric_lines = open(metric_dir).readlines()
if len(metric_lines) > 0:
best_score_line = metric_lines[-1]
self.best_score = float(best_score_line.strip().split()[-1])
# check the top_k_best list and results
self.topk_scores = []
topk_dir = os.path.join(self.output_best_dir, "topk_checkpoint")
ckpt_dir = os.path.join(self.output_best_dir, "checkpoint")
# direct load the topk information from topk_checkpoints
if tf.gfile.Exists(topk_dir):
with tf.gfile.Open(topk_dir) as reader:
for line in reader:
model_name, score = line.strip().split("\t")
self.topk_scores.append((model_name, float(score)))
# backup plan to normal checkpoints and best scores
elif tf.gfile.Exists(ckpt_dir):
latest_checkpoint = tf.gfile.Open(ckpt_dir).readline()
model_name = latest_checkpoint.strip().split(":")[1].strip()
model_name = model_name[1:-1] # remove ""
self.topk_scores.append((model_name, self.best_score))
self.best_checkpoints = best_checkpoints
self.score_record = tf.gfile.Open(metric_dir, mode="a+")
def save(self, session, step, metric_score=None):
if not tf.gfile.Exists(self.output_dir):
tf.gfile.MkDir(self.output_dir)
if not tf.gfile.Exists(self.output_best_dir):
tf.gfile.MkDir(self.output_best_dir)
self.saver.save(session, os.path.join(self.output_dir, "model"), global_step=step)
def _move(path, new_path):
if tf.gfile.Exists(path):
if tf.gfile.Exists(new_path):
tf.gfile.Remove(new_path)
tf.gfile.Copy(path, new_path)
if metric_score is not None and metric_score > self.best_score:
self.best_score = metric_score
_move(os.path.join(self.output_dir, "param.json"),
os.path.join(self.output_best_dir, "param.json"))
_move(os.path.join(self.output_dir, "record.json"),
os.path.join(self.output_best_dir, "record.json"))
# this recorder only record best scores
self.score_record.write("Steps {}, Metric Score {}\n".format(step, metric_score))
self.score_record.flush()
# either no model is saved, or current metric score is better than the minimum one
if metric_score is not None and \
(len(self.topk_scores) == 0 or len(self.topk_scores) < self.best_checkpoints or
metric_score > min([v[1] for v in self.topk_scores])):
# manipulate the 'checkpoints', and change the orders
ckpt_dir = os.path.join(self.output_best_dir, "checkpoint")
if len(self.topk_scores) > 0:
sorted_topk_scores = sorted(self.topk_scores, key=lambda x: x[1])
with tf.gfile.Open(ckpt_dir, mode='w') as writer:
best_ckpt = sorted_topk_scores[-1]
writer.write("model_checkpoint_path: \"{}\"\n".format(best_ckpt[0]))
for model_name, _ in sorted_topk_scores:
writer.write("all_model_checkpoint_paths: \"{}\"\n".format(model_name))
writer.flush()
# update best_saver internal checkpoints status
ckpt = tf.train.get_checkpoint_state(self.output_best_dir)
if ckpt and ckpt.all_model_checkpoint_paths:
self.best_saver.recover_last_checkpoints(list(ckpt.all_model_checkpoint_paths))
# this change mainly inspired by that sometimes for dataset,
# the best performance is achieved by averaging top-k checkpoints
self.best_saver.save(
session, os.path.join(self.output_best_dir, "model"), global_step=step)
# handle topk scores
self.topk_scores.append(("model-{}".format(int(step)), float(metric_score)))
sorted_topk_scores = sorted(self.topk_scores, key=lambda x: x[1])
self.topk_scores = sorted_topk_scores[-self.best_checkpoints:]
topk_dir = os.path.join(self.output_best_dir, "topk_checkpoint")
with tf.gfile.Open(topk_dir, mode='w') as writer:
for model_name, score in self.topk_scores:
writer.write("{}\t{}\n".format(model_name, score))
writer.flush()
def restore(self, session, path=None):
if path is not None and tf.gfile.Exists(path):
check_dir = path
else:
check_dir = self.output_dir
checkpoint = os.path.join(check_dir, "checkpoint")
if not tf.gfile.Exists(checkpoint):
tf.logging.warn("No Existing Model detected")
else:
latest_checkpoint = tf.gfile.Open(checkpoint).readline()
model_name = latest_checkpoint.strip().split(":")[1].strip()
model_name = model_name[1:-1] # remove ""
model_path = os.path.join(check_dir, model_name)
model_path = os.path.abspath(model_path)
if not tf.gfile.Exists(model_path+".meta"):
tf.logging.error("model '{}' does not exists"
.format(model_path))
# Try to fallback to best checkpoint
if path is None and check_dir == self.output_dir:
best_checkpoint = os.path.join(self.output_best_dir, "checkpoint")
if tf.gfile.Exists(best_checkpoint):
tf.logging.warn("Attempting to restore from best checkpoint directory")
best_checkpoint_line = tf.gfile.Open(best_checkpoint).readline()
best_model_name = best_checkpoint_line.strip().split(":")[1].strip()
best_model_name = best_model_name[1:-1] # remove ""
best_model_path = os.path.join(self.output_best_dir, best_model_name)
best_model_path = os.path.abspath(best_model_path)
if tf.gfile.Exists(best_model_path+".meta"):
tf.logging.info("Found valid best checkpoint at '{}'".format(best_model_path))
try:
self.best_saver.restore(session, best_model_path)
tf.logging.info("Successfully restored from best checkpoint")
except Exception as e:
tf.logging.error("Failed to restore from best checkpoint: {}".format(e))
else:
tf.logging.error("Best checkpoint also corrupted")
else:
try:
self.saver.restore(session, model_path)
except tf.errors.NotFoundError:
# In this case, we simply assume that the cycle part
# is mismatched, where the replicas are missing.
# This would happen if you switch from un-cycle mode
# to cycle mode.
tf.logging.warn("Starting Backup Restore")
ops = []
reader = tf.train.load_checkpoint(model_path)
for var in tf.global_variables():
name = var.op.name
if reader.has_tensor(name):
tf.logging.info('{} get initialization from {}'
.format(name, name))
ops.append(
tf.assign(var, reader.get_tensor(name)))
else:
tf.logging.warn("{} is missed".format(name))
restore_op = tf.group(*ops, name="restore_global_vars")
session.run(restore_op)
|