| |
| import os |
| import time |
| import argparse |
| import json |
| import yaml |
| import math |
| import copy |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from safetensors.torch import save_file as safetensors_save |
|
|
| from model import ConditionalMDLM, apply_mask |
| from dataset import create_dataloaders |
|
|
| def get_lr(step, warmup_steps, max_steps, max_lr, min_lr_ratio=0.0): |
| if step < warmup_steps: |
| return max_lr * step / warmup_steps |
| progress = (step - warmup_steps) / max(1, max_steps - warmup_steps) |
| min_lr = max_lr * min_lr_ratio |
| return min_lr + (max_lr - min_lr) * 0.5 * (1 + math.cos(math.pi * progress)) |
|
|
| def _meta(step, best_val_loss, config): |
| mc = config.get("model", {}) |
| return { |
| "step": str(step), |
| "best_val_loss": f"{best_val_loss:.6f}", |
| "encoder_model": str(mc.get("encoder_model", "unknown")), |
| "decoder_tokenizer": str(mc.get("decoder_tokenizer", "unknown")), |
| "vocab_size": str(mc.get("vocab_size", 0)), |
| "hidden_dim": str(mc.get("hidden_dim", 0)), |
| "config_json": json.dumps(config, default=str), |
| } |
|
|
| def save_checkpoint(path, step, best_val_loss, model, ema_model, optimizer, config): |
| torch.save({ |
| "step": step, |
| "best_val_loss": best_val_loss, |
| "model": model.state_dict(), |
| "ema_model": ema_model.state_dict(), |
| "optimizer": optimizer.state_dict(), |
| "config": config, |
| }, path) |
|
|
| def save_ema(path, step, best_val_loss, ema_model, config): |
| st_path = path.replace(".pt", ".safetensors") |
| safetensors_save(ema_model.state_dict(), st_path, metadata=_meta(step, best_val_loss, config)) |
|
|
| def train(config, resume=False): |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print(f"Device: {device}\nGPU: {torch.cuda.get_device_name() if device.type == 'cuda' else 'N/A'}") |
|
|
| mc, tc = config["model"], config["training"] |
|
|
| model = ConditionalMDLM(config).to(device) |
| total_params, trainable_params = model.count_params() |
| print(f"Model params: {total_params:,} total, {trainable_params:,} trainable") |
|
|
| ema_decay = tc.get("ema_decay", 0.9999) |
| ema_model = copy.deepcopy(model) |
| ema_model.eval() |
| for p in ema_model.parameters(): p.requires_grad_(False) |
|
|
| batch_size = tc.get("batch_size", 128) |
| print(f"Loading data... (Batch size: {batch_size})") |
| train_loader, val_loader = create_dataloaders(config) |
|
|
| optimizer = torch.optim.AdamW(model.parameters(), lr=tc["lr"], weight_decay=tc["weight_decay"]) |
| |
| |
| grad_accum = tc.get("grad_accum", 1) |
| |
| ckpt_dir = config.get("_ckpt_dir", "checkpoints") |
| os.makedirs(ckpt_dir, exist_ok=True) |
| start_step, best_val_loss = 0, float("inf") |
|
|
| if resume: |
| ckpt_path = f"{ckpt_dir}/latest.pt" |
| if not os.path.exists(ckpt_path): ckpt_path = f"{ckpt_dir}/best.pt" |
| if os.path.exists(ckpt_path): |
| ckpt = torch.load(ckpt_path, map_location=device, weights_only=False) |
| model.load_state_dict({k.replace("_orig_mod.", ""): v for k, v in ckpt["model"].items()}) |
| optimizer.load_state_dict(ckpt["optimizer"]) |
| start_step, best_val_loss = ckpt["step"], ckpt.get("best_val_loss", float("inf")) |
| if "ema_model" in ckpt: |
| ema_model.load_state_dict({k.replace("_orig_mod.", ""): v for k, v in ckpt["ema_model"].items()}) |
|
|
| model.train() |
| step = start_step |
| max_steps, log_every, eval_every = tc["max_steps"], tc["log_every"], tc.get("eval_every", 500) |
| early_stop_patience = tc.get("early_stop_patience", 5000) |
|
|
| running_loss, running_acc, running_count, total_samples = 0.0, 0.0, 0, 0 |
| micro_step = 0 |
| t0_global = time.time() |
| data_iter = iter(train_loader) |
|
|
| print(f"\n=== Training started (step {step}/{max_steps}) ===") |
|
|
| while step < max_steps: |
| try: |
| batch = next(data_iter) |
| except StopIteration: |
| data_iter = iter(train_loader) |
| batch = next(data_iter) |
|
|
| token_ids = batch["token_ids"].to(device) |
| embedding = batch["embedding"].to(device) |
| padding_mask = batch["padding_mask"].to(device) |
|
|
| masked_ids, target_mask, mask_ratio = apply_mask(token_ids, mc["mask_token_id"], padding_mask) |
|
|
| if micro_step == 0: |
| lr = get_lr(step, tc["warmup_steps"], max_steps, tc["lr"], tc.get("min_lr_ratio", 0.0)) |
| for pg in optimizer.param_groups: pg["lr"] = lr |
| optimizer.zero_grad() |
|
|
| |
| with torch.amp.autocast('cuda', dtype=torch.bfloat16): |
| hidden = model.forward_hidden(masked_ids, embedding, padding_mask) |
| |
| mask_flat = target_mask.view(-1) |
| pad_flat = padding_mask.view(-1) |
| active_mask = mask_flat & (~pad_flat) |
| total_active = active_mask.sum().item() |
|
|
| chunk_size = 256 |
| total_positions = hidden.shape[0] * hidden.shape[1] |
| hidden_flat = hidden.view(-1, hidden.shape[-1]) |
| targets_flat = token_ids.view(-1) |
| |
| total_loss = torch.tensor(0.0, device=device) |
| total_correct = 0 |
|
|
| for i in range(0, total_positions, chunk_size): |
| end = min(i + chunk_size, total_positions) |
| h_chunk = hidden_flat[i:end] |
| t_chunk = targets_flat[i:end] |
| m_chunk = active_mask[i:end].float() |
|
|
| w = model.output_proj.weight |
| logits_chunk = F.linear(h_chunk, w) |
| |
| loss_chunk = F.cross_entropy(logits_chunk, t_chunk, reduction="none") |
| total_loss = total_loss + (loss_chunk * m_chunk).sum() |
|
|
| with torch.no_grad(): |
| preds_chunk = logits_chunk.argmax(-1) |
| total_correct += ((preds_chunk == t_chunk) * m_chunk.bool()).sum().item() |
|
|
| loss = total_loss / max(total_active, 1) |
| loss_weight = (1.0 / mask_ratio.squeeze(1)).mean() |
| loss = (loss * loss_weight) / grad_accum |
|
|
| |
| loss.backward() |
|
|
| running_loss += loss.item() * grad_accum |
| running_acc += total_correct / max(total_active, 1) |
| running_count += 1 |
| total_samples += token_ids.shape[0] |
| micro_step += 1 |
|
|
| if micro_step < grad_accum: continue |
|
|
| |
| micro_step = 0 |
| nn.utils.clip_grad_norm_(model.parameters(), tc["max_grad_norm"]) |
| optimizer.step() |
| |
| with torch.no_grad(): |
| for ep, mp in zip(ema_model.parameters(), model.parameters()): |
| ep.mul_(ema_decay).add_(mp, alpha=1 - ema_decay) |
| step += 1 |
|
|
| if step % log_every == 0: |
| avg_loss = running_loss / running_count |
| avg_acc = running_acc / running_count |
| elapsed = (time.time() - t0_global) / 60 |
| rate = total_samples / (time.time() - t0_global) |
| print(f"step {step} | loss {avg_loss:.4f} | acc {avg_acc:.3f} | lr {lr:.2e} | {rate:.0f} samp/s | {elapsed:.1f}m", flush=True) |
| running_loss, running_acc, running_count = 0.0, 0.0, 0 |
|
|
| def main(): |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--config", default="configs/v2_qwen3.yaml") |
| parser.add_argument("--resume", action="store_true") |
| args = parser.parse_args() |
|
|
| with open(args.config) as f: config = yaml.safe_load(f) |
| config["_ckpt_dir"] = f"checkpoints_{os.path.splitext(os.path.basename(args.config))[0]}" |
| train(config, resume=args.resume) |
|
|
| if __name__ == "__main__": |
| main() |