|
|
|
|
|
|
|
|
from __future__ import absolute_import |
|
|
from __future__ import division |
|
|
from __future__ import print_function |
|
|
|
|
|
import time |
|
|
import os |
|
|
import codecs |
|
|
import random |
|
|
import socket |
|
|
import json |
|
|
|
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
import tensorflow.contrib as tc |
|
|
|
|
|
import main as graph |
|
|
from utils.apply_bpe import BPE |
|
|
from models.vocab import Vocab |
|
|
from utils import dtype, util |
|
|
|
|
|
logger = tf.get_logger() |
|
|
logger.propagate = False |
|
|
|
|
|
|
|
|
|
|
|
global_params = tc.training.HParams( |
|
|
|
|
|
shared_source_target_embedding=False, |
|
|
|
|
|
shared_target_softmax_embedding=True, |
|
|
|
|
|
|
|
|
sign_cfg='', |
|
|
|
|
|
gloss_path='', |
|
|
smkd_model_path='', |
|
|
|
|
|
|
|
|
collect_attention_weights=False, |
|
|
|
|
|
|
|
|
inference_video_path=None, |
|
|
|
|
|
|
|
|
sep_layer=0, |
|
|
|
|
|
src_codes='', |
|
|
tgt_codes='', |
|
|
src_bpe_dropout=0., |
|
|
tgt_bpe_dropout=0., |
|
|
bpe_dropout_stochastic_rate=0.6, |
|
|
|
|
|
|
|
|
decode_length=50, |
|
|
|
|
|
beam_size=4, |
|
|
|
|
|
decode_alpha=0.6, |
|
|
|
|
|
enable_noise_beam_search=False, |
|
|
|
|
|
beam_search_temperature=1.0, |
|
|
|
|
|
top_beams=1, |
|
|
|
|
|
remove_bpe=False, |
|
|
|
|
|
|
|
|
ctc_repeated=False, |
|
|
|
|
|
ctc_enable=False, |
|
|
|
|
|
ctc_alpha=0.3, |
|
|
|
|
|
|
|
|
|
|
|
warmup_steps=400, |
|
|
|
|
|
lrate=1e-5, |
|
|
|
|
|
min_lrate=0.0, |
|
|
|
|
|
max_lrate=1.0, |
|
|
|
|
|
|
|
|
|
|
|
initializer="uniform", |
|
|
|
|
|
initializer_gain=0.08, |
|
|
|
|
|
|
|
|
|
|
|
hidden_size=512, |
|
|
|
|
|
embed_size=512, |
|
|
|
|
|
img_feature_size=2048, |
|
|
|
|
|
img_aug_size=11, |
|
|
|
|
|
filter_size=2048, |
|
|
|
|
|
dropout=0.1, |
|
|
relu_dropout=0.1, |
|
|
residual_dropout=0.1, |
|
|
|
|
|
scope_name="transformer", |
|
|
|
|
|
attention_dropout=0.1, |
|
|
|
|
|
num_encoder_layer=6, |
|
|
|
|
|
num_decoder_layer=6, |
|
|
|
|
|
num_heads=8, |
|
|
|
|
|
|
|
|
max_len=100, |
|
|
max_img_len=512, |
|
|
eval_max_len=1000, |
|
|
|
|
|
batch_size=80, |
|
|
|
|
|
token_size=3000, |
|
|
|
|
|
batch_or_token='token', |
|
|
|
|
|
eval_batch_size=32, |
|
|
|
|
|
shuffle_batch=True, |
|
|
|
|
|
|
|
|
process_num=1, |
|
|
|
|
|
buffer_size=100, |
|
|
|
|
|
input_queue_size=100, |
|
|
output_queue_size=100, |
|
|
|
|
|
data_leak_ratio=0.5, |
|
|
|
|
|
|
|
|
src_vocab_file="", |
|
|
|
|
|
tgt_vocab_file="", |
|
|
|
|
|
src_train_file="", |
|
|
|
|
|
tgt_train_file="", |
|
|
|
|
|
img_train_file="", |
|
|
|
|
|
src_dev_file="", |
|
|
|
|
|
tgt_dev_file="", |
|
|
|
|
|
img_dev_file="", |
|
|
|
|
|
src_test_file="", |
|
|
|
|
|
tgt_test_file="", |
|
|
|
|
|
img_test_file="", |
|
|
|
|
|
output_dir="", |
|
|
|
|
|
test_output="", |
|
|
|
|
|
pretrained_model="", |
|
|
|
|
|
|
|
|
beta1=0.9, |
|
|
beta2=0.999, |
|
|
epsilon=1e-9, |
|
|
|
|
|
clip_grad_norm=5.0, |
|
|
|
|
|
gnorm_upper_bound=1e20, |
|
|
|
|
|
estop_patience=100, |
|
|
|
|
|
label_smooth=0.1, |
|
|
|
|
|
epoches=10, |
|
|
|
|
|
|
|
|
update_cycle=1, |
|
|
|
|
|
gpus=[0], |
|
|
|
|
|
safe_nan=False, |
|
|
|
|
|
|
|
|
deep_transformer_init=False, |
|
|
|
|
|
eval_task="sign2text", |
|
|
|
|
|
|
|
|
disp_freq=100, |
|
|
|
|
|
eval_freq=10000, |
|
|
|
|
|
save_freq=5000, |
|
|
|
|
|
sample_freq=1000, |
|
|
|
|
|
checkpoints=5, |
|
|
best_checkpoints=1, |
|
|
|
|
|
max_training_steps=1000, |
|
|
|
|
|
|
|
|
nthreads=6, |
|
|
|
|
|
random_seed=1234, |
|
|
|
|
|
train_continue=True, |
|
|
|
|
|
|
|
|
default_dtype="float32", |
|
|
dtype_epsilon=1e-8, |
|
|
dtype_inf=1e8, |
|
|
loss_scale=1.0, |
|
|
) |
|
|
|
|
|
flags = tf.flags |
|
|
flags.DEFINE_string("config", "", "Additional Mergable Parameters") |
|
|
flags.DEFINE_string("parameters", "", "Command Line Refinable Parameters") |
|
|
flags.DEFINE_string("name", "model", "Description of the training process for distinguishing") |
|
|
flags.DEFINE_string("mode", "train", "train or test or ensemble") |
|
|
|
|
|
|
|
|
|
|
|
def save_parameters(params, output_dir): |
|
|
if not tf.gfile.Exists(output_dir): |
|
|
tf.gfile.MkDir(output_dir) |
|
|
|
|
|
param_name = os.path.join(output_dir, "param.json") |
|
|
with tf.gfile.Open(param_name, "w") as writer: |
|
|
tf.logging.info("Saving parameters into {}" |
|
|
.format(param_name)) |
|
|
writer.write(params.to_json()) |
|
|
|
|
|
|
|
|
|
|
|
def load_parameters(params, output_dir): |
|
|
param_name = os.path.join(output_dir, "param.json") |
|
|
param_name = os.path.abspath(param_name) |
|
|
|
|
|
if tf.gfile.Exists(param_name): |
|
|
tf.logging.info("Loading parameters from {}" |
|
|
.format(param_name)) |
|
|
with tf.gfile.Open(param_name, 'r') as reader: |
|
|
json_str = reader.readline() |
|
|
params.parse_json(json_str) |
|
|
return params |
|
|
|
|
|
|
|
|
class Recorder(object): |
|
|
def load_from_json(self, file_name): |
|
|
tf.logging.info("Loading recoder file from {}".format(file_name)) |
|
|
with open(file_name, 'r', encoding='utf-8') as fh: |
|
|
self.__dict__.update(json.load(fh)) |
|
|
|
|
|
def save_to_json(self, file_name): |
|
|
tf.logging.info("Saving recorder file into {}".format(file_name)) |
|
|
with open(file_name, 'w', encoding='utf-8') as fh: |
|
|
json.dump(self.__dict__, fh, indent=2) |
|
|
|
|
|
|
|
|
|
|
|
def setup_recorder(params): |
|
|
recorder = Recorder() |
|
|
|
|
|
recorder.bad_counter = 0 |
|
|
recorder.estop = False |
|
|
|
|
|
recorder.lidx = -1 |
|
|
recorder.step = 0 |
|
|
recorder.epoch = 1 |
|
|
recorder.lrate = params.lrate |
|
|
recorder.history_scores = [] |
|
|
recorder.valid_script_scores = [] |
|
|
|
|
|
|
|
|
record_path = os.path.join(params.output_dir, "record.json") |
|
|
record_path = os.path.abspath(record_path) |
|
|
if tf.gfile.Exists(record_path): |
|
|
recorder.load_from_json(record_path) |
|
|
|
|
|
params.add_hparam('recorder', recorder) |
|
|
return params |
|
|
|
|
|
|
|
|
|
|
|
def print_parameters(params): |
|
|
tf.logging.info("The Used Configuration:") |
|
|
for k, v in params.values().items(): |
|
|
tf.logging.info("%s\t%s", k.ljust(20), str(v).ljust(20)) |
|
|
tf.logging.info("") |
|
|
|
|
|
|
|
|
def main(_): |
|
|
|
|
|
tf.logging.set_verbosity(tf.logging.INFO) |
|
|
|
|
|
tf.logging.info("Welcome Using Zero :)") |
|
|
|
|
|
pid = os.getpid() |
|
|
tf.logging.info("Your pid is {0} and use the following command to force kill your running:\n" |
|
|
"'pkill -9 -P {0}; kill -9 {0}'".format(pid)) |
|
|
|
|
|
tf.logging.info("Your running machine name is {}".format(socket.gethostname())) |
|
|
|
|
|
params = global_params |
|
|
|
|
|
|
|
|
|
|
|
params.parse(flags.FLAGS.parameters) |
|
|
if os.path.exists(flags.FLAGS.config): |
|
|
params.override_from_dict(eval(open(flags.FLAGS.config).read())) |
|
|
params = load_parameters(params, params.output_dir) |
|
|
|
|
|
if os.path.exists(flags.FLAGS.config): |
|
|
params.override_from_dict(eval(open(flags.FLAGS.config).read())) |
|
|
params.parse(flags.FLAGS.parameters) |
|
|
|
|
|
|
|
|
random.seed(params.random_seed) |
|
|
np.random.seed(params.random_seed) |
|
|
tf.set_random_seed(params.random_seed) |
|
|
|
|
|
|
|
|
tf.logging.info("Begin Loading Vocabulary") |
|
|
start_time = time.time() |
|
|
params.src_vocab = Vocab(params.src_vocab_file) |
|
|
params.tgt_vocab = Vocab(params.tgt_vocab_file) |
|
|
params.src_bpe = BPE(codecs.open(params.src_codes, encoding='utf-8'), -1, '@@', None, None) |
|
|
params.tgt_bpe = BPE(codecs.open(params.tgt_codes, encoding='utf-8'), -1, '@@', None, None) |
|
|
tf.logging.info("End Loading Vocabulary, Source Vocab Size {}, " |
|
|
"Target Vocab Size {}, within {} seconds" |
|
|
.format(params.src_vocab.size(), params.tgt_vocab.size(), time.time() - start_time)) |
|
|
|
|
|
|
|
|
print_parameters(params) |
|
|
|
|
|
|
|
|
collect_attn = getattr(params, 'collect_attention_weights', None) |
|
|
tf.logging.info(f"[DEBUG] params.collect_attention_weights = {collect_attn}") |
|
|
|
|
|
|
|
|
dtype.set_floatx(params.default_dtype) |
|
|
dtype.set_epsilon(params.dtype_epsilon) |
|
|
dtype.set_inf(params.dtype_inf) |
|
|
|
|
|
mode = flags.FLAGS.mode |
|
|
if mode == "train": |
|
|
|
|
|
save_parameters(params, params.output_dir) |
|
|
|
|
|
params = setup_recorder(params) |
|
|
|
|
|
graph.train(params) |
|
|
elif mode == "test": |
|
|
graph.evaluate(params) |
|
|
elif mode == "infer": |
|
|
graph.inference(params) |
|
|
else: |
|
|
tf.logging.error("Invalid mode: {}".format(mode)) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
tf.app.run() |
|
|
|