File size: 15,891 Bytes
2e20169 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
import pdb
import sys
import cv2
import yaml
import torch
import random
import importlib
import faulthandler
import numpy as np
import torch.nn as nn
import shutil
import inspect
import time
from collections import OrderedDict
faulthandler.enable()
import utils
from modules.sync_batchnorm import convert_model
from seq_scripts import seq_train, seq_eval, seq_feature_generation
from torch.cuda.amp import autocast as autocast
class Processor():
def __init__(self, arg):
self.arg = arg
if os.path.exists(self.arg.work_dir):
# Auto-remove for non-interactive mode
print(f'Work dir {self.arg.work_dir} exists, removing...')
shutil.rmtree(self.arg.work_dir)
os.makedirs(self.arg.work_dir)
else:
os.makedirs(self.arg.work_dir)
shutil.copy2(__file__, self.arg.work_dir)
shutil.copy2('./configs/baseline.yaml', self.arg.work_dir)
shutil.copy2('./modules/tconv.py', self.arg.work_dir)
shutil.copy2('./modules/resnet.py', self.arg.work_dir)
self.recoder = utils.Recorder(self.arg.work_dir, self.arg.print_log, self.arg.log_interval)
self.save_arg()
if self.arg.random_fix:
self.rng = utils.RandomState(seed=self.arg.random_seed)
self.device = utils.GpuDataParallel()
self.recoder = utils.Recorder(self.arg.work_dir, self.arg.print_log, self.arg.log_interval)
self.dataset = {}
self.data_loader = {}
self.gloss_dict = np.load(self.arg.dataset_info['dict_path'], allow_pickle=True).item()
# Check if gloss_dict contains blank token
has_blank = any('blank' in str(k).lower() for k in self.gloss_dict.keys())
# If blank is not in dict, add 1 for blank token (like Phoenix2014)
# If blank is in dict, use dict length as is (like ASLLRP)
self.arg.model_args['num_classes'] = len(self.gloss_dict) if has_blank else len(self.gloss_dict) + 1
self.model, self.optimizer = self.loading()
def start(self):
if self.arg.phase == 'train':
best_dev = 100.0
best_epoch = 0
total_time = 0
epoch_time = 0
self.recoder.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
seq_model_list = []
for epoch in range(self.arg.optimizer_args['start_epoch'], self.arg.num_epoch):
save_model = epoch % self.arg.save_interval == 0
eval_model = epoch % self.arg.eval_interval == 0
epoch_time = time.time()
# train end2end model
seq_train(self.data_loader['train'], self.model, self.optimizer,
self.device, epoch, self.recoder)
if eval_model:
dev_wer = seq_eval(self.arg, self.data_loader['dev'], self.model, self.device,
'dev', epoch, self.arg.work_dir, self.recoder, self.arg.evaluate_tool)
self.recoder.print_log("Dev WER: {:05.2f}%".format(dev_wer))
if dev_wer < best_dev:
best_dev = dev_wer
best_epoch = epoch
model_path = "{}_best_model.pt".format(self.arg.work_dir)
self.save_model(epoch, model_path)
self.recoder.print_log('Save best model')
self.recoder.print_log('Best_dev: {:05.2f}, Epoch : {}'.format(best_dev, best_epoch))
if save_model:
model_path = "{}dev_{:05.2f}_epoch{}_model.pt".format(self.arg.work_dir, dev_wer, epoch)
seq_model_list.append(model_path)
print("seq_model_list", seq_model_list)
self.save_model(epoch, model_path)
epoch_time = time.time() - epoch_time
total_time += epoch_time
torch.cuda.empty_cache()
self.recoder.print_log('Epoch {} costs {} mins {} seconds'.format(epoch, int(epoch_time)//60, int(epoch_time)%60))
self.recoder.print_log('Training costs {} hours {} mins {} seconds'.format(int(total_time)//60//60, int(total_time)//60%60, int(total_time)%60))
elif self.arg.phase == 'test':
if self.arg.load_weights is None and self.arg.load_checkpoints is None:
print('Please appoint --weights.')
self.recoder.print_log('Model: {}.'.format(self.arg.model))
self.recoder.print_log('Weights: {}.'.format(self.arg.load_weights))
# train_wer = seq_eval(self.arg, self.data_loader["train_eval"], self.model, self.device,
# "train", 6667, self.arg.work_dir, self.recoder, self.arg.evaluate_tool)
dev_wer = seq_eval(self.arg, self.data_loader["dev"], self.model, self.device,
"dev", 6667, self.arg.work_dir, self.recoder, self.arg.evaluate_tool)
test_wer = seq_eval(self.arg, self.data_loader["test"], self.model, self.device,
"test", 6667, self.arg.work_dir, self.recoder, self.arg.evaluate_tool)
self.recoder.print_log('Evaluation Done.\n')
elif self.arg.phase == "features":
for mode in ["train", "dev", "test"]:
seq_feature_generation(
self.data_loader[mode + "_eval" if mode == "train" else mode],
self.model, self.device, mode, self.arg.work_dir, self.recoder
)
elif self.arg.phase == 'finetune':
best_dev = 100.0
best_epoch = 0
total_time = 0
epoch_time = 0
self.recoder.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
seq_model_list = []
for name, m in self.model.conv2d.named_modules():
m.requires_grad = False
for name, m in self.model.conv1d.named_modules():
if 'fc' not in name:
m.requires_grad = False
for name, m in self.model.temporal_model.named_modules():
m.requires_grad = False
from slr_network import NormLinear
self.model.classifier = NormLinear(1024, len(self.gloss_dict) + 1).cuda()
self.model.conv1d.fc = self.model.classifier
for epoch in range(self.arg.optimizer_args['start_epoch'], self.arg.num_epoch):
save_model = epoch % self.arg.save_interval == 0
eval_model = epoch % self.arg.eval_interval == 0
epoch_time = time.time()
# train end2end model
seq_train(self.data_loader['train'], self.model, self.optimizer,
self.device, epoch, self.recoder)
if eval_model:
dev_wer = seq_eval(self.arg, self.data_loader['dev'], self.model, self.device,
'dev', epoch, self.arg.work_dir, self.recoder, self.arg.evaluate_tool)
self.recoder.print_log("Dev WER: {:05.2f}%".format(dev_wer))
if dev_wer < best_dev:
best_dev = dev_wer
best_epoch = epoch
model_path = "{}_best_model.pt".format(self.arg.work_dir)
self.save_model(epoch, model_path)
self.recoder.print_log('Save best model')
self.recoder.print_log('Best_dev: {:05.2f}, Epoch : {}'.format(best_dev, best_epoch))
if save_model:
model_path = "{}dev_{:05.2f}_epoch{}_model.pt".format(self.arg.work_dir, dev_wer, epoch)
seq_model_list.append(model_path)
print("seq_model_list", seq_model_list)
self.save_model(epoch, model_path)
epoch_time = time.time() - epoch_time
total_time += epoch_time
torch.cuda.empty_cache()
self.recoder.print_log('Epoch {} costs {} mins {} seconds'.format(epoch, int(epoch_time)//60, int(epoch_time)%60))
self.recoder.print_log('Training costs {} hours {} mins {} seconds'.format(int(total_time)//60//60, int(total_time)//60%60, int(total_time)%60))
def save_arg(self):
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
yaml.dump(arg_dict, f)
def save_model(self, epoch, save_path):
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.optimizer.scheduler.state_dict(),
'rng_state': self.rng.save_rng_state(),
}, save_path)
def loading(self):
self.device.set_device(self.arg.device)
print("Loading model")
model_class = import_class(self.arg.model)
model = model_class(
**self.arg.model_args,
gloss_dict=self.gloss_dict,
loss_weights=self.arg.loss_weights,
)
shutil.copy2(inspect.getfile(model_class), self.arg.work_dir)
optimizer = utils.Optimizer(model, self.arg.optimizer_args)
if self.arg.load_weights:
self.load_model_weights(model, self.arg.load_weights)
elif self.arg.load_checkpoints:
self.load_checkpoint_weights(model, optimizer)
model = self.model_to_device(model)
# Handle DataParallel wrapper
if isinstance(model, nn.DataParallel):
self.kernel_sizes = model.module.conv1d.kernel_size
else:
self.kernel_sizes = model.conv1d.kernel_size
print("Loading model finished.")
self.load_data()
return model, optimizer
def model_to_device(self, model):
model = model.to(self.device.output_device)
if len(self.device.gpu_list) > 1:
# Use DataParallel for multi-GPU training
model = nn.DataParallel(model, device_ids=self.device.gpu_list, output_device=self.device.output_device)
print(f"Using DataParallel on GPUs: {self.device.gpu_list}")
model = convert_model(model)
model.cuda()
return model
def load_model_weights(self, model, weight_path):
state_dict = torch.load(weight_path)
if len(self.arg.ignore_weights):
for w in self.arg.ignore_weights:
if state_dict.pop(w, None) is not None:
print('Successfully Remove Weights: {}.'.format(w))
else:
print('Can Not Remove Weights: {}.'.format(w))
weights = self.modified_weights(state_dict['model_state_dict'], False)
# weights = self.modified_weights(state_dict['model_state_dict'])
model.load_state_dict(weights, strict=True)
@staticmethod
def modified_weights(state_dict, modified=False):
state_dict = OrderedDict([(k.replace('.module', ''), v) for k, v in state_dict.items()])
if not modified:
return state_dict
modified_dict = dict()
return modified_dict
def load_checkpoint_weights(self, model, optimizer):
self.load_model_weights(model, self.arg.load_checkpoints)
state_dict = torch.load(self.arg.load_checkpoints)
if len(torch.cuda.get_rng_state_all()) == len(state_dict['rng_state']['cuda']):
print("Loading random seeds...")
self.rng.set_rng_state(state_dict['rng_state'])
if "optimizer_state_dict" in state_dict.keys():
print("Loading optimizer parameters...")
optimizer.load_state_dict(state_dict["optimizer_state_dict"])
optimizer.to(self.device.output_device)
if "scheduler_state_dict" in state_dict.keys():
print("Loading scheduler parameters...")
optimizer.scheduler.load_state_dict(state_dict["scheduler_state_dict"])
self.arg.optimizer_args['start_epoch'] = state_dict["epoch"] + 1
self.recoder.print_log("Resuming from checkpoint: epoch {self.arg.optimizer_args['start_epoch']}")
def load_data(self):
print("Loading data")
from tqdm import tqdm
self.feeder = import_class(self.arg.feeder)
shutil.copy2(inspect.getfile(self.feeder), self.arg.work_dir)
if self.arg.dataset == 'CSL':
dataset_list = zip(["train", "dev"], [True, False])
elif 'phoenix' in self.arg.dataset:
dataset_list = zip(["train", "dev", "test"], [True, False, False])
elif self.arg.dataset == 'CSL-Daily':
dataset_list = zip(["train", "dev", "test"], [True, False, False])
elif self.arg.dataset == 'ASLLRP':
dataset_list = zip(["train", "dev", "test"], [True, False, False])
dataset_list = list(dataset_list)
for idx, (mode, train_flag) in enumerate(tqdm(dataset_list, desc="Creating data loaders")):
arg = self.arg.feeder_args
arg["prefix"] = self.arg.dataset_info['dataset_root']
arg["mode"] = mode.split("_")[0]
arg["transform_mode"] = train_flag
self.dataset[mode] = self.feeder(gloss_dict=self.gloss_dict, kernel_size= self.kernel_sizes, dataset=self.arg.dataset, **arg)
print(f" Building DataLoader for {mode} set...")
self.data_loader[mode] = self.build_dataloader(self.dataset[mode], mode, train_flag)
print("Loading data finished.")
def init_fn(self, worker_id):
np.random.seed(int(self.arg.random_seed)+worker_id)
def build_dataloader(self, dataset, mode, train_flag):
print(f" Initializing {self.arg.num_worker} workers for {mode} DataLoader...")
loader = torch.utils.data.DataLoader(
dataset,
batch_size=self.arg.batch_size if mode == "train" else self.arg.test_batch_size,
shuffle=train_flag,
drop_last=train_flag,
num_workers=self.arg.num_worker, # if train_flag else 0
collate_fn=self.feeder.collate_fn,
pin_memory=True,
worker_init_fn=self.init_fn,
persistent_workers=True if self.arg.num_worker > 0 else False, # Keep workers alive
prefetch_factor=2, # Prefetch batches
)
# Force worker initialization by accessing first batch
if self.arg.num_worker > 0:
print(f" Warming up workers...")
import time
start_time = time.time()
try:
_ = next(iter(loader))
print(f" Workers initialized in {time.time() - start_time:.1f}s")
except StopIteration:
pass
return loader
def import_class(name):
components = name.rsplit('.', 1)
mod = importlib.import_module(components[0])
mod = getattr(mod, components[1])
return mod
if __name__ == '__main__':
sparser = utils.get_parser()
p = sparser.parse_args()
# p.config = "baseline_iter.yaml"
if p.config is not None:
with open(p.config, 'r') as f:
try:
default_arg = yaml.load(f, Loader=yaml.FullLoader)
except AttributeError:
default_arg = yaml.load(f)
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('WRONG ARG: {}'.format(k))
assert (k in key)
sparser.set_defaults(**default_arg)
args = sparser.parse_args()
with open(f"./configs/{args.dataset}.yaml", 'r') as f:
args.dataset_info = yaml.load(f, Loader=yaml.FullLoader)
processor = Processor(args)
utils.pack_code("./", args.work_dir)
processor.start()
|