File size: 5,702 Bytes
c94c8c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import copy
from tqdm import tqdm
import torch
from trainer.build import TRAINER_REGISTRY
from trainer.build import BaseTrainer
@TRAINER_REGISTRY.register()
class DefaultTrainer(BaseTrainer):
def __init__(self, cfg):
super().__init__(cfg)
self.best_metric = -1
def forward(self, data_dict, mode):
return self.model(data_dict, mode)
def backward(self, loss):
self.optimizer.zero_grad()
self.accelerator.backward(loss)
if self.grad_norm is not None and self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.optimizer.step()
self.scheduler.step()
def train_step(self, epoch):
self.model.train()
loader = self.data_loaders["train"]
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process), desc=f"[Epoch {epoch + 1}/{self.epochs}]")
for i, data_dict in enumerate(loader):
with self.accelerator.accumulate(self.model):
data_dict['cur_step'] = epoch * len(loader) + i
data_dict['total_steps'] = self.total_steps
# forward
data_dict = self.forward(data_dict, mode = 'qa')
# calculate loss
loss, losses = self.loss(data_dict)
self.backward(loss)
# record
self.global_step += 1
log_dict = {'step': self.global_step}
log_dict.update(losses)
self.log(log_dict, mode="train")
pbar.update(1)
def _gather_for_metrics(self, data_dict):
"""
Gather the minimal fields evaluator needs across processes.
Assumes these are tensors.
"""
out = {}
for k in ["answer_scores", "answer_label", "sqa_type"]:
v = data_dict[k]
out[k] = self.accelerator.gather_for_metrics(v)
return out
@torch.no_grad()
def eval_step(self, epoch):
self.model.eval()
loader = self.data_loaders["val"]
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process))
for _, data_dict in enumerate(loader):
data_dict = self.forward(data_dict, mode="qa")
gathered = {}
for k in ["answer_scores", "answer_label", "sqa_type"]:
gathered[k] = self.accelerator.gather_for_metrics(data_dict[k])
if self.accelerator.is_main_process:
self.evaluator.update(gathered)
pbar.update(1)
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
is_best, results = self.evaluator.record()
if is_best:
self.best_metric = results["target_metric"]
self.log(results, mode="val")
self.evaluator.reset()
return is_best
return False
@torch.no_grad()
def test_step(self):
self.model.eval()
loader = self.data_loaders["val"]
pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process))
for _, data_dict in enumerate(loader):
data_dict = self.forward(data_dict, mode="qa")
# gather minimal fields needed for metrics across all GPUs
gathered = {}
for k in ["answer_scores", "answer_label", "sqa_type"]:
gathered[k] = self.accelerator.gather_for_metrics(data_dict[k])
# only main process accumulates + records to avoid double counting
if self.accelerator.is_main_process:
self.evaluator.update(gathered)
pbar.update(1)
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
_, results = self.evaluator.record(split="test")
self.log(results, mode="test")
self.evaluator.reset()
else:
results = None
# broadcast results (optional). If you only need results on main, you can just return None on others.
return results if self.accelerator.is_main_process else None
def run(self):
if self.mode == "train":
model = self.model.module if hasattr(self.model, 'module') else self.model
model.set_downstream_mode()
start_epoch = self.exp_tracker.epoch
num_trainable_params = 0
for name, param in self.model.named_parameters():
if param.requires_grad:
num_trainable_params += param.numel()
print(f"Total number of trainable parameters: {num_trainable_params:,}")
self.global_step = start_epoch * len(self.data_loaders["train"])
for epoch in range(start_epoch, self.epochs):
self.exp_tracker.step()
self.train_step(epoch)
if self.epochs_per_eval and (epoch + 1) % self.epochs_per_eval == 0:
is_best = self.eval_step(epoch)
self.accelerator.print(f"[Epoch {epoch + 1}/{self.epochs}] finished eval, is_best: {is_best}")
else:
is_best = False
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.save("latest.pth")
if is_best:
self.save("best.pth")
if self.epochs_per_save and (epoch + 1) % self.epochs_per_save == 0:
self.save(f"ckpt_{epoch+1}.pth")
self.test_step()
if self.mode == "train":
self.accelerator.end_training()
|