File size: 10,363 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import copy as cp
import glob
from datetime import timedelta
from pathlib import Path
from omegaconf import OmegaConf
from omegaconf import open_dict
from tqdm import tqdm
import numpy as np

from accelerate import Accelerator, DistributedDataParallelKwargs
from accelerate.logging import get_logger
from accelerate.utils import set_seed, InitProcessGroupKwargs
from fvcore.common.registry import Registry
import torch
import wandb

import common.io_utils as iu
from common.io_utils import make_dir
import common.misc as misc
from data.build import build_dataloader
from evaluator.build import build_eval
from model.build import build_model
from optim.build import build_optim
from safetensors.torch import load_file

TRAINER_REGISTRY = Registry("Trainer")


def _global_l2(tensors):
    """Compute sqrt(Σ‖t‖₂²) over an iterable of tensors."""
    total_sq = torch.tensor(0.0)
    for t in tensors:
        total_sq += t.float().pow(2).sum()
    return total_sq.sqrt()


class Tracker():
    def __init__(self, cfg):
        self.reset(cfg)

    def step(self):
        self.epoch += 1

    def reset(self, cfg):
        self.exp_name = f"{cfg.exp_dir.parent.name.replace(f'{cfg.name}', '').lstrip('_')}/{cfg.exp_dir.name}"
        self.epoch = 0
        self.best_result = -np.inf

    def state_dict(self):
        return {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
    
    def load_state_dict(self, state_dict):
        self.__dict__.update(state_dict)

@TRAINER_REGISTRY.register()
class BaseTrainer():
    def __init__(self, cfg):
        set_seed(cfg.rng_seed)
        self.debug = cfg.debug.get("flag", False)
        self.hard_debug = cfg.debug.get("hard_debug", False)
        self.epochs_per_eval = cfg.solver.get("epochs_per_eval", None)
        self.epochs_per_save = cfg.solver.get("epochs_per_save", None)
        self.global_step = 0
        
        # Initialize accelerator
        self.exp_tracker = Tracker(cfg)
        wandb_args = {"entity": cfg.logger.entity, "id": cfg.logger.run_id, "resume": cfg.resume}
        if not cfg.logger.get('autoname'):
            wandb_args["name"] = self.exp_tracker.exp_name
        # There is bug in logger setting, needs fixing from accelerate side
        self.logger = get_logger(__name__)
        self.mode = cfg.mode

        ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
        init_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=5400))
        kwargs = ([ddp_kwargs] if cfg.num_gpu > 1 else []) + [init_kwargs]

        gradient_accumulation_steps = cfg.solver.get("gradient_accumulation_steps", 1)
   
        self.accelerator = Accelerator(
            gradient_accumulation_steps=gradient_accumulation_steps,
            log_with=cfg.logger.name,
            kwargs_handlers=kwargs
        )
        
        if not self.hard_debug:
            self.accelerator.init_trackers(
                    project_name=cfg.name if not self.debug else "Debug",
                    config=OmegaConf.to_container(cfg, resolve=True, throw_on_missing=True) if not cfg.resume else None,
                    init_kwargs={"wandb": wandb_args}
                )
            
        print(OmegaConf.to_yaml(cfg))

        if cfg.model.name == 'Query3D':
            # choose whether to load mv or voxel features based on model.memories for Query3D
            # TODO: a better way to do this?
            if 'mv' in cfg.model.memories or 'sem' in cfg.model.memories:
                cfg.data.load_multiview_info = True
            if 'voxel' in cfg.model.memories or 'sem' in cfg.model.memories:
                cfg.data.load_mask3d_voxel = True
            txt_model2tokenizer = {'BERTLanguageEncoder': 'bert-base-uncased', 'CLIPLanguageEncoder': 'openai/clip-vit-large-patch14'}
            cfg.data_wrapper.tokenizer = txt_model2tokenizer[cfg.model.txt_encoder.name]
            
        if self.mode in ["warmup", "pretrain"]:
            keys = [self.mode]
        else:
            keys = ["train", "val", "test"] 
            
        self.data_loaders = {key : build_dataloader(cfg, split=key) for key in keys}
        self.model = build_model(cfg)
        
        if self.mode == 'warmup':
            self.epochs = cfg.solver.warmup_epochs
        else:
            self.epochs = cfg.solver.epochs
            
        if self.mode == "test":
            total_steps = 1
        else:
            total_steps = (len(self.data_loaders[self.mode]) * self.epochs) // gradient_accumulation_steps
        self.loss, self.optimizer, self.scheduler = build_optim(cfg, self.model.get_opt_params(),
                                                                total_steps= total_steps, accelerator = self.accelerator)
        
        if misc.rgetattr(cfg, "eval.pass_kwargs", False):
            kwargs = {"dataloaders": self.data_loaders}
        else:
            kwargs = {}
        self.evaluator = build_eval(cfg, self.accelerator, **kwargs)

        # Training details
        
        self.total_steps = 1 if self.mode == "test" else len(self.data_loaders[self.mode]) * self.epochs
        self.grad_norm = cfg.solver.get("grad_norm")

        ema = [0.996, 1.0]
        ipe_scale = 1.0
        self.momentum_scheduler = (ema[0] + i*(ema[1]-ema[0])/(self.total_steps*self.epochs*ipe_scale)
                          for i in range(int(self.total_steps*self.epochs*ipe_scale)+1))
        
        # Load pretrain model weights
        if cfg.get('pretrain_ckpt_path'):
            self.pretrain_ckpt_path = Path(cfg.pretrain_ckpt_path)
            self.load_pretrain()
            if hasattr(self.model, "pm_encoder"):
                self.model.pm_encoder.load_state_dict(self.model.pm_encoder.state_dict())

        # Accelerator preparation
        self.model, self.loss, self.optimizer, self.scheduler = self.accelerator.prepare(self.model, self.loss, self.optimizer, self.scheduler)
        for name, loader in self.data_loaders.items():
            if isinstance(loader, list):
                loader = self.accelerator.prepare(*loader)
            else:
                loader = self.accelerator.prepare(loader)
            self.data_loaders[name] = loader
        self.accelerator.register_for_checkpointing(self.exp_tracker)

        # Check if resuming from previous checkpoint is needed
        self.ckpt_path = Path(cfg.ckpt_path) if cfg.get("ckpt_path") else Path(cfg.exp_dir) / "ckpt" / "best.pth"
        if cfg.resume:
            self.resume()

    def forward(self, data_dict):
        return self.model(data_dict)

    def update_ema(self):
        # Update the momentum scheduler 
        with torch.no_grad():
            m = next(self.momentum_scheduler)
            # Automatically handle .module for DDP
            model_context = self.model.module.context_model if hasattr(self.model, 'module') else self.model.context_model
            model_target = self.model.module.target_model if hasattr(self.model, 'module') else self.model.target_model

            for param_q, param_k in zip(model_context.parameters(), model_target.parameters()):
                param_k.data.mul_(m).add_((1. - m) * param_q.detach().data)
                
    def backward(self, loss):
        # Backprop
        self.accelerator.backward(loss)

        total_norm = torch.norm(torch.stack([
            torch.norm(p.grad.detach()) for p in self.model.parameters() if p.grad is not None
        ]))
        print(f"grad_norm={total_norm.item():.2f}")
        
        # Gradient clipping (only when syncing gradients)
        if self.grad_norm is not None and self.accelerator.sync_gradients:
            self.accelerator.clip_grad_norm_(self.model.parameters(), self.grad_norm)

        # Optimizer step only when syncing gradients
        if self.accelerator.sync_gradients:
            self.optimizer.step()
            self.optimizer.zero_grad()
            self.scheduler.step()

    def log(self, results, mode="train"):
        if not self.hard_debug:
            log_dict = {}
            for key, val in results.items():
                if isinstance(val, torch.Tensor):
                    val = val.item()
                log_dict[f"{mode}/{key}"] = val
            if mode == "train":
                lrs = self.scheduler.get_lr()
                for i, lr in enumerate(lrs):
                    log_dict[f"{mode}/lr/group_{i}"] = lr
            self.accelerator.log(log_dict, step=self.global_step)

    def save(self, name):
        make_dir(self.ckpt_path.parent)
        self.save_func(str(self.ckpt_path.parent / name))

    def resume(self):
        if self.ckpt_path.exists():
            print(f"Resuming from {str(self.ckpt_path)}")
            # self.logger.info(f"Resuming from {str(self.ckpt_path)}")
            self.accelerator.load_state(str(self.ckpt_path))
            # self.logger.info(f"Successfully resumed from {self.ckpt_path}")
            print(f"Successfully resumed from {self.ckpt_path}")
        else:
            self.logger.info("training from scratch")
    
    def load_pretrain(self):
        print(f"📂 Loading pretrained weights from: {str(self.pretrain_ckpt_path)}")
        model_weight_path_pattern = str(self.pretrain_ckpt_path / "model*.safetensors")
        model_weight_paths = glob.glob(model_weight_path_pattern)

        if len(model_weight_paths) == 0:
            raise FileNotFoundError(f"❌ Cannot find any .safetensors file in {str(self.pretrain_ckpt_path)}")

        # Load and merge weights
        weights = {}
        for model_weight_path in model_weight_paths:
            weights.update(load_file(model_weight_path, device="cpu"))

        # Load weights with strict=False
        result = self.model.load_state_dict(weights, strict=False)
        
        model_keys = set(self.model.state_dict().keys())
        loaded_keys = model_keys.intersection(weights.keys())
        missing_keys = result.missing_keys
        unexpected_keys = result.unexpected_keys
        print(missing_keys)
        print(f"✅ Loaded keys:      {len(loaded_keys)} / {len(model_keys)}")
        print(f"❌ Missing keys:     {len(missing_keys)}")
        print(f"⚠️ Unexpected keys:  {len(unexpected_keys)}")
        
    def save_func(self, path):
        self.accelerator.save_state(path)
    
def build_trainer(cfg):
    return TRAINER_REGISTRY.get(cfg.trainer)(cfg)