|
|
import os |
|
|
import pdb |
|
|
import sys |
|
|
import copy |
|
|
import torch |
|
|
import torch.distributed as dist |
|
|
import h5py |
|
|
import numpy as np |
|
|
import torch.nn as nn |
|
|
from tqdm import tqdm |
|
|
import torch.nn.functional as F |
|
|
import contextlib |
|
|
|
|
|
try: |
|
|
from torch.distributed.algorithms.join import Join |
|
|
except ImportError: |
|
|
Join = None |
|
|
|
|
|
try: |
|
|
from evaluation.slr_eval.wer_calculation import evaluate |
|
|
from utils.sample_utils import extract_sample_id |
|
|
except: |
|
|
from .evaluation.slr_eval.wer_calculation import evaluate |
|
|
from .utils.sample_utils import extract_sample_id |
|
|
|
|
|
|
|
|
def _unwrap_model(model): |
|
|
return model.module if hasattr(model, "module") else model |
|
|
|
|
|
|
|
|
def seq_train(loader, model, optimizer, device, epoch_idx, recoder, |
|
|
is_master=True, rank=0, log_bad_samples=True): |
|
|
model.train() |
|
|
base_model = _unwrap_model(model) |
|
|
loss_value = [] |
|
|
total_loss_sum = 0.0 |
|
|
total_loss_count = 0 |
|
|
bad_sample_ids = [] if log_bad_samples else None |
|
|
clr = [group['lr'] for group in optimizer.optimizer.param_groups] |
|
|
join_context = contextlib.nullcontext() |
|
|
if Join is not None and dist.is_available() and dist.is_initialized(): |
|
|
join_context = Join([model]) |
|
|
with join_context: |
|
|
for batch_idx, data in enumerate(tqdm(loader, disable=not is_master)): |
|
|
vid = device.data_to_device(data[0]) |
|
|
vid_lgt = device.data_to_device(data[1]) |
|
|
label = device.data_to_device(data[2]) |
|
|
label_lgt = device.data_to_device(data[3]) |
|
|
ret_dict = model(vid, vid_lgt, label=label, label_lgt=label_lgt) |
|
|
loss = base_model.criterion_calculation(ret_dict, label, label_lgt) |
|
|
loss_item = loss.detach().item() |
|
|
finite = np.isfinite(loss_item) |
|
|
if not finite: |
|
|
batch_ids = [] |
|
|
if log_bad_samples: |
|
|
for info in data[-1]: |
|
|
sid = extract_sample_id(info) |
|
|
if sid: |
|
|
batch_ids.append(sid) |
|
|
if batch_ids and bad_sample_ids is not None: |
|
|
bad_sample_ids.extend(batch_ids) |
|
|
msg = f"[rank {rank}] Non-finite loss at epoch {epoch_idx}, batch {batch_idx}, sample {batch_ids or data[-1]}" |
|
|
if recoder is not None and is_master: |
|
|
recoder.print_log(msg) |
|
|
else: |
|
|
print(msg) |
|
|
loss = torch.nan_to_num(loss, nan=0.0, posinf=0.0, neginf=0.0) |
|
|
else: |
|
|
total_loss_sum += float(loss_item) |
|
|
total_loss_count += 1 |
|
|
optimizer.zero_grad() |
|
|
loss.backward() |
|
|
|
|
|
optimizer.step() |
|
|
loss_value.append(loss_item if finite else np.nan) |
|
|
if batch_idx % recoder.log_interval == 0: |
|
|
|
|
|
log_msg = '\tEpoch: {}, Batch({}/{}) done. Loss: {:.8f} lr:{:.6f}'.format( |
|
|
epoch_idx, batch_idx, len(loader), |
|
|
loss_item if finite else 0.0, clr[0]) |
|
|
|
|
|
|
|
|
if hasattr(base_model, 'multimodal_pose_assist') and base_model.multimodal_pose_assist: |
|
|
try: |
|
|
current_weight = base_model.get_pose_assist_weight() |
|
|
if isinstance(current_weight, torch.Tensor): |
|
|
current_weight = current_weight.item() |
|
|
|
|
|
weight_percentage = (current_weight / base_model.pose_assist_weight_init * 100) if base_model.pose_assist_weight_init > 0 else 0 |
|
|
log_msg += ' pose_w:{:.6f}({:.1f}%)'.format(current_weight, weight_percentage) |
|
|
except: |
|
|
pass |
|
|
|
|
|
recoder.print_log(log_msg) |
|
|
optimizer.scheduler.step() |
|
|
valid_losses = np.array([lv for lv in loss_value if np.isfinite(lv)], dtype=np.float64) |
|
|
if valid_losses.size > 0: |
|
|
mean_loss = float(valid_losses.mean()) |
|
|
else: |
|
|
mean_loss = float('nan') |
|
|
recoder.print_log('\tMean training loss: {:.10f}.'.format(mean_loss)) |
|
|
|
|
|
|
|
|
if hasattr(base_model, 'multimodal_pose_assist') and base_model.multimodal_pose_assist: |
|
|
try: |
|
|
current_weight = base_model.get_pose_assist_weight() |
|
|
if isinstance(current_weight, torch.Tensor): |
|
|
current_weight = current_weight.item() |
|
|
weight_percentage = (current_weight / base_model.pose_assist_weight_init * 100) if base_model.pose_assist_weight_init > 0 else 0 |
|
|
|
|
|
if base_model.pose_assist_learnable: |
|
|
|
|
|
param_value = base_model.pose_assist_weight_param.item() |
|
|
alpha = torch.sigmoid(torch.tensor(param_value)).item() |
|
|
recoder.print_log( |
|
|
'\tPose Assist Weight: {:.6f} ({:.1f}% of max={:.6f}) [learnable_param={:.4f}, sigmoid={:.4f}]'.format( |
|
|
current_weight, weight_percentage, base_model.pose_assist_weight_init, param_value, alpha)) |
|
|
else: |
|
|
|
|
|
recoder.print_log( |
|
|
'\tPose Assist Weight: {:.6f} (FIXED)'.format(current_weight)) |
|
|
except: |
|
|
pass |
|
|
|
|
|
return total_loss_sum, total_loss_count, bad_sample_ids if log_bad_samples else None |
|
|
|
|
|
|
|
|
def seq_eval(cfg, loader, model, device, mode, epoch, work_dir, recoder, |
|
|
evaluate_tool="python", is_master=True): |
|
|
model.eval() |
|
|
eval_model = _unwrap_model(model) |
|
|
total_sent = [] |
|
|
total_info = [] |
|
|
total_conv_sent = [] |
|
|
stat = {i: [0, 0] for i in range(len(loader.dataset.dict))} |
|
|
for batch_idx, data in enumerate(tqdm(loader, disable=not is_master)): |
|
|
recoder.record_timer("device") |
|
|
vid = device.data_to_device(data[0]) |
|
|
vid_lgt = device.data_to_device(data[1]) |
|
|
label = device.data_to_device(data[2]) |
|
|
label_lgt = device.data_to_device(data[3]) |
|
|
with torch.no_grad(): |
|
|
ret_dict = eval_model(vid, vid_lgt, label=label, label_lgt=label_lgt) |
|
|
|
|
|
total_info += [file_name[2] for file_name in data[-1]] |
|
|
total_sent += ret_dict['recognized_sents'] |
|
|
total_conv_sent += ret_dict['conv_sents'] |
|
|
try: |
|
|
python_eval = True if evaluate_tool == "python" else False |
|
|
write2file(work_dir + "output-hypothesis-{}.ctm".format(mode), total_info, total_sent) |
|
|
write2file(work_dir + "output-hypothesis-{}-conv.ctm".format(mode), total_info, |
|
|
total_conv_sent) |
|
|
conv_ret = evaluate( |
|
|
prefix=work_dir, mode=mode, output_file="output-hypothesis-{}-conv.ctm".format(mode), |
|
|
evaluate_dir=cfg.dataset_info['evaluation_dir'], |
|
|
dataset_dir=cfg.dataset_info['dataset_root'], |
|
|
evaluate_prefix=cfg.dataset_info['evaluation_prefix'], |
|
|
output_dir="epoch_{}_result/".format(epoch), |
|
|
python_evaluate=python_eval, |
|
|
) |
|
|
lstm_ret = evaluate( |
|
|
prefix=work_dir, mode=mode, output_file="output-hypothesis-{}.ctm".format(mode), |
|
|
evaluate_dir=cfg.dataset_info['evaluation_dir'], |
|
|
dataset_dir=cfg.dataset_info['dataset_root'], |
|
|
evaluate_prefix=cfg.dataset_info['evaluation_prefix'], |
|
|
output_dir="epoch_{}_result/".format(epoch), |
|
|
python_evaluate=python_eval, |
|
|
triplet=True, |
|
|
) |
|
|
except: |
|
|
print("Unexpected error:", sys.exc_info()[0]) |
|
|
lstm_ret = 100.0 |
|
|
finally: |
|
|
pass |
|
|
recoder.print_log(f"Epoch {epoch}, {mode} {lstm_ret: 2.2f}%", f"{work_dir}/{mode}.txt") |
|
|
return lstm_ret |
|
|
|
|
|
|
|
|
def seq_feature_generation(loader, model, device, mode, recoder, is_master=True): |
|
|
model.eval() |
|
|
feature_model = _unwrap_model(model) |
|
|
|
|
|
tgt_path = os.path.abspath(f"./features/{mode}") |
|
|
if not os.path.exists("./features/"): |
|
|
os.makedirs("./features/") |
|
|
|
|
|
features = {} |
|
|
for batch_idx, data in tqdm(enumerate(loader), disable=not is_master): |
|
|
recoder.record_timer("device") |
|
|
vid = device.data_to_device(data[0]) |
|
|
vid_lgt = device.data_to_device(data[1]) |
|
|
with torch.no_grad(): |
|
|
ret_dict = feature_model(vid, vid_lgt) |
|
|
|
|
|
feat_len = ret_dict['feat_len'].cpu().detach().numpy().astype(np.int32) |
|
|
visual_features = ret_dict['visual_features'].permute(1, 0, 2) |
|
|
for sample_idx in range(len(vid)): |
|
|
visual_feature = visual_features[sample_idx][:feat_len[sample_idx]].cpu().detach().numpy().astype(np.float32) |
|
|
features[data[-1][sample_idx][1]] = visual_feature |
|
|
|
|
|
hf = h5py.File(tgt_path + ".h5", 'w') |
|
|
fkeys = sorted(list(features.keys())) |
|
|
for i, dkey in enumerate(fkeys): |
|
|
feature = features[dkey] |
|
|
hf.create_dataset("%s" % i, data=feature) |
|
|
|
|
|
hf.close() |
|
|
|
|
|
|
|
|
def write2file(path, info, output): |
|
|
filereader = open(path, "w") |
|
|
for sample_idx, sample in enumerate(output): |
|
|
for word_idx, word in enumerate(sample): |
|
|
filereader.writelines( |
|
|
"{} 1 {:.2f} {:.2f} {}\n".format(info[sample_idx], |
|
|
word_idx * 1.0 / 100, |
|
|
(word_idx + 1) * 1.0 / 100, |
|
|
word[0])) |
|
|
|