File size: 4,851 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from tqdm import tqdm
import torch
from trainer.build import TRAINER_REGISTRY, BaseTrainer

@TRAINER_REGISTRY.register()
class OpenVocabTrainer(BaseTrainer):
    def __init__(self, cfg):
        super().__init__(cfg)
        self.best_metric = -1

    def forward(self, data_dict, mode):
        return self.model(data_dict, mode)

    def backward(self, loss, mode=None):
        self.accelerator.backward(loss)
        
        if self.grad_norm is not None and self.accelerator.sync_gradients:
                self.accelerator.clip_grad_norm_(self.model.parameters(), self.grad_norm)
            
        self.optimizer.step()
        self.scheduler.step()   
        self.optimizer.zero_grad()

    def train_step(self, epoch, mode = None):
        self.model.train()
        loader = self.data_loaders[self.mode]
        is_main = self.accelerator.is_main_process
        
        pbar = tqdm(loader, disable=not is_main, desc=f"[Epoch {epoch + 1}/{self.epochs}]")

        for data_dict in pbar:
            with self.accelerator.accumulate(self.model):
                # Forward pass
                data_dict = self.forward(data_dict, mode=mode)

                # Compute loss
                loss, losses = self.loss(data_dict)
                
                # Backward + optimizer step
                self.backward(loss, mode=mode)

                # Logging
                self.global_step += 1
                log_dict = {'step': self.global_step, **losses}

                if mode == 'qa':
                    metrics = self.evaluator["train"].batch_metrics(data_dict)
                    log_dict.update(metrics)
                self.log(log_dict, mode="train")

    @torch.no_grad()
    def eval_step(self, epoch, mode):
        self.model.eval()
        loader = self.data_loaders["val"]
        pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process))
        for i, data_dict in enumerate(loader):
            data_dict = self.forward(data_dict, mode = mode)
            loss, losses = self.loss(data_dict)
            log_dict = {'epoch': epoch, **losses}
            pbar.update(1)
        self.log(log_dict, mode="val")

    @torch.no_grad()
    def test_step(self):
        self.model.eval()
        loader = self.data_loaders["test"]
        pbar = tqdm(range(len(loader)), disable=(not self.accelerator.is_main_process))
        for i, data_dict in enumerate(loader):
            data_dict = self.forward(data_dict)
            self.evaluator["val"].update(data_dict)
            pbar.update(1)
        is_best, results = self.evaluator["val"].record()
        self.log(results, mode="test")
        self.evaluator["val"].reset()
        return results

    def run(self):
        num_trainable_params = 0
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                num_trainable_params += param.numel()
                print(name)

        print(f"Total number of trainable parameters: {num_trainable_params:,}")
        if self.mode in ['warmup', "pretrain"]:
            start_epoch = self.exp_tracker.epoch
            self.global_step = start_epoch * len(self.data_loaders[self.mode])
            
            for epoch in range(start_epoch, self.epochs):
                self.exp_tracker.step()
                self.train_step(epoch, mode=self.mode)
                
                self.accelerator.wait_for_everyone()
                if self.accelerator.is_main_process:
                    if self.epochs_per_save and (epoch + 1) % self.epochs_per_save == 0:
                        self.save(f"ckpt_{epoch+1}.pth")

            self.save(f"ckpt_{epoch+1}.pth")
            self.accelerator.end_training()
        else:
            if self.mode == "train":
                start_epoch = self.exp_tracker.epoch
                self.global_step = start_epoch * len(self.data_loaders["train"])
                for epoch in range(start_epoch, self.epochs):
                    self.exp_tracker.step()
                    self.train_step(epoch)
                    if self.epochs_per_eval and (epoch + 1) % self.epochs_per_eval == 0:
                        is_best = self.eval_step(epoch)
                        self.accelerator.print(f"[Epoch {epoch + 1}/{self.epochs}] finished eval, is_best: {is_best}")
                    else:
                        is_best = False

                    self.accelerator.wait_for_everyone()
                    if self.accelerator.is_main_process:
                        if is_best:
                            self.save("best.pth")
                        if self.epochs_per_save and (epoch + 1) % self.epochs_per_save == 0:
                            self.save(f"ckpt_{epoch+1}.pth")

            self.test_step()
            if self.mode == "train":
                self.accelerator.end_training()