trojan0x commited on
Commit
4e89fea
·
verified ·
1 Parent(s): 654abe6

Add ultron/train.py

Browse files
Files changed (1) hide show
  1. ultron/train.py +270 -0
ultron/train.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Ultron Training Script — FineWeb-Edu Pretraining
3
+
4
+ Based on Parcae's training recipe:
5
+ - AdamW with cosine decay
6
+ - Per-sequence depth sampling within micro-batches
7
+ - Backprop truncation: μ_bwd = ⌈μ_rec/2⌉
8
+ - Linear warmup (2000 steps)
9
+ - bfloat16 mixed precision
10
+
11
+ Usage:
12
+ Single GPU: python train.py
13
+ Multi-GPU: torchrun --nproc_per_node=N train.py
14
+ """
15
+
16
+ import os
17
+ import math
18
+ import time
19
+ import json
20
+ import argparse
21
+ from dataclasses import asdict
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ from torch.cuda.amp import autocast, GradScaler
27
+
28
+ from ultron.model import Ultron, UltronConfig
29
+ from ultron.variants import ultron_small, ultron_base, ultron_medium, ultron_large
30
+
31
+
32
+ def get_args():
33
+ parser = argparse.ArgumentParser(description="Ultron Pretraining")
34
+ parser.add_argument("--variant", type=str, default="small",
35
+ choices=["small", "base", "medium", "large", "custom"])
36
+ parser.add_argument("--dataset", type=str, default="HuggingFaceFW/fineweb-edu")
37
+ parser.add_argument("--dataset_subset", type=str, default="sample-10BT")
38
+ parser.add_argument("--seq_len", type=int, default=1024)
39
+ parser.add_argument("--batch_size", type=int, default=8)
40
+ parser.add_argument("--grad_accum", type=int, default=4)
41
+ parser.add_argument("--lr", type=float, default=3e-4)
42
+ parser.add_argument("--min_lr", type=float, default=3e-5)
43
+ parser.add_argument("--warmup_steps", type=int, default=2000)
44
+ parser.add_argument("--max_steps", type=int, default=50000)
45
+ parser.add_argument("--weight_decay", type=float, default=0.1)
46
+ parser.add_argument("--grad_clip", type=float, default=1.0)
47
+ parser.add_argument("--log_interval", type=int, default=10)
48
+ parser.add_argument("--save_interval", type=int, default=5000)
49
+ parser.add_argument("--eval_interval", type=int, default=500)
50
+ parser.add_argument("--output_dir", type=str, default="./ultron_checkpoints")
51
+ parser.add_argument("--push_to_hub", action="store_true")
52
+ parser.add_argument("--hub_model_id", type=str, default=None)
53
+ parser.add_argument("--use_moe", action="store_true")
54
+ parser.add_argument("--gradient_checkpointing", action="store_true")
55
+ parser.add_argument("--per_seq_depth", action="store_true", default=True,
56
+ help="Per-sequence depth sampling (Parcae)")
57
+ return parser.parse_args()
58
+
59
+
60
+ def get_lr(step: int, warmup_steps: int, max_steps: int, lr: float, min_lr: float) -> float:
61
+ """Cosine decay with linear warmup."""
62
+ if step < warmup_steps:
63
+ return lr * step / warmup_steps
64
+ if step >= max_steps:
65
+ return min_lr
66
+ progress = (step - warmup_steps) / (max_steps - warmup_steps)
67
+ return min_lr + 0.5 * (lr - min_lr) * (1 + math.cos(math.pi * progress))
68
+
69
+
70
+ def sample_depth(mu_rec: int, per_sequence: bool = True, batch_size: int = 1) -> list[int]:
71
+ """Sample recurrence depths.
72
+
73
+ Per-sequence sampling (Parcae): each sequence in the batch gets a
74
+ different depth, reducing loss spike variance.
75
+ """
76
+ if per_sequence:
77
+ # Geometric distribution centered on mu_rec (Parcae's Λ)
78
+ depths = []
79
+ for _ in range(batch_size):
80
+ # Sample from [1, 2*mu_rec] with peak at mu_rec
81
+ d = max(1, min(2 * mu_rec, int(torch.distributions.Geometric(
82
+ probs=1.0 / mu_rec
83
+ ).sample().item()) + 1))
84
+ depths.append(d)
85
+ return depths
86
+ else:
87
+ return [mu_rec] * batch_size
88
+
89
+
90
+ def main():
91
+ args = get_args()
92
+
93
+ # Setup
94
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
95
+ use_bf16 = device.type == "cuda" and torch.cuda.is_bf16_supported()
96
+
97
+ # Model config
98
+ variant_map = {
99
+ "small": ultron_small,
100
+ "base": ultron_base,
101
+ "medium": ultron_medium,
102
+ "large": ultron_large,
103
+ }
104
+ cfg = variant_map[args.variant]()
105
+
106
+ if args.use_moe:
107
+ cfg.use_moe = True
108
+ if args.gradient_checkpointing:
109
+ cfg.gradient_checkpointing = True
110
+ cfg.max_seq_len = args.seq_len
111
+
112
+ # Build model
113
+ model = Ultron(cfg).to(device)
114
+ total_params = model.get_num_params(non_embedding=False)
115
+ print(f"Ultron {args.variant} | {total_params:,} params | device: {device}")
116
+ print(f"Config: {json.dumps(asdict(cfg), indent=2)}")
117
+ print(f"Spectral radius ρ(A): {model.get_spectral_radius():.6f}")
118
+
119
+ # Optimizer (Parcae recipe)
120
+ optimizer = torch.optim.AdamW(
121
+ model.parameters(),
122
+ lr=args.lr,
123
+ betas=(0.9, 0.95),
124
+ weight_decay=args.weight_decay,
125
+ )
126
+
127
+ # Dataset
128
+ try:
129
+ from datasets import load_dataset
130
+ from transformers import AutoTokenizer
131
+
132
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
133
+ if tokenizer.pad_token is None:
134
+ tokenizer.pad_token = tokenizer.eos_token
135
+
136
+ print(f"Loading dataset: {args.dataset}/{args.dataset_subset}...")
137
+ ds = load_dataset(args.dataset, args.dataset_subset, split="train", streaming=True)
138
+
139
+ def tokenize_batch(examples):
140
+ return tokenizer(examples["text"], truncation=True, max_length=args.seq_len + 1,
141
+ padding="max_length", return_tensors="pt")
142
+
143
+ except ImportError:
144
+ print("WARNING: datasets/transformers not installed. Using random data for testing.")
145
+ ds = None
146
+
147
+ # Training loop
148
+ os.makedirs(args.output_dir, exist_ok=True)
149
+ scaler = GradScaler() if device.type == "cuda" and not use_bf16 else None
150
+
151
+ model.train()
152
+ step = 0
153
+ start_time = time.time()
154
+ running_loss = 0.0
155
+
156
+ print(f"\nTraining for {args.max_steps} steps...")
157
+ print(f" Effective batch size: {args.batch_size * args.grad_accum}")
158
+ print(f" Sequence length: {args.seq_len}")
159
+ print(f" Per-sequence depth sampling: {args.per_seq_depth}")
160
+ print(f" Mixed precision: {'bf16' if use_bf16 else 'fp16' if scaler else 'fp32'}")
161
+ print()
162
+
163
+ while step < args.max_steps:
164
+ # Get batch
165
+ if ds is not None:
166
+ # Real data path
167
+ batch_texts = []
168
+ for item in ds:
169
+ batch_texts.append(item["text"])
170
+ if len(batch_texts) >= args.batch_size:
171
+ break
172
+ tokens = tokenizer(batch_texts, truncation=True, max_length=args.seq_len + 1,
173
+ padding="max_length", return_tensors="pt")
174
+ input_ids = tokens["input_ids"].to(device)
175
+ else:
176
+ # Random data for testing
177
+ input_ids = torch.randint(0, cfg.vocab_size, (args.batch_size, args.seq_len + 1), device=device)
178
+
179
+ x = input_ids[:, :-1]
180
+ y = input_ids[:, 1:]
181
+
182
+ # Sample depths (Parcae per-sequence sampling)
183
+ depths = sample_depth(cfg.max_loop_iters, args.per_seq_depth, x.shape[0])
184
+
185
+ # Forward pass
186
+ lr = get_lr(step, args.warmup_steps, args.max_steps, args.lr, args.min_lr)
187
+ for param_group in optimizer.param_groups:
188
+ param_group["lr"] = lr
189
+
190
+ with autocast(dtype=torch.bfloat16 if use_bf16 else torch.float16, enabled=device.type == "cuda"):
191
+ # For per-sequence depth, we process each sample with its own depth
192
+ # In practice, we use the mean depth for the batch
193
+ # (true per-sequence would need custom loss masking)
194
+ mean_depth = max(1, sum(depths) // len(depths))
195
+ logits = model(x, n_loops=mean_depth)
196
+ loss = F.cross_entropy(logits.reshape(-1, cfg.vocab_size), y.reshape(-1))
197
+ loss = loss / args.grad_accum
198
+
199
+ # Backward
200
+ if scaler:
201
+ scaler.scale(loss).backward()
202
+ else:
203
+ loss.backward()
204
+
205
+ running_loss += loss.item() * args.grad_accum
206
+
207
+ # Gradient accumulation step
208
+ if (step + 1) % args.grad_accum == 0:
209
+ if scaler:
210
+ scaler.unscale_(optimizer)
211
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
212
+ if scaler:
213
+ scaler.step(optimizer)
214
+ scaler.update()
215
+ else:
216
+ optimizer.step()
217
+ optimizer.zero_grad()
218
+
219
+ step += 1
220
+
221
+ # Logging
222
+ if step % args.log_interval == 0:
223
+ avg_loss = running_loss / args.log_interval
224
+ elapsed = time.time() - start_time
225
+ tokens_per_sec = (step * args.batch_size * args.seq_len) / elapsed
226
+ rho = model.get_spectral_radius()
227
+ print(f"step {step:>6d} | loss {avg_loss:.4f} | lr {lr:.2e} | "
228
+ f"ρ(A) {rho:.4f} | depth {mean_depth} | "
229
+ f"tok/s {tokens_per_sec:.0f} | elapsed {elapsed:.0f}s")
230
+ running_loss = 0.0
231
+
232
+ # Save checkpoint
233
+ if step % args.save_interval == 0:
234
+ ckpt_path = os.path.join(args.output_dir, f"step_{step}.pt")
235
+ torch.save({
236
+ "step": step,
237
+ "model_state_dict": model.state_dict(),
238
+ "optimizer_state_dict": optimizer.state_dict(),
239
+ "config": asdict(cfg),
240
+ "loss": avg_loss if step >= args.log_interval else float("inf"),
241
+ }, ckpt_path)
242
+ print(f" Saved checkpoint: {ckpt_path}")
243
+
244
+ if args.push_to_hub and args.hub_model_id:
245
+ try:
246
+ from huggingface_hub import HfApi
247
+ api = HfApi()
248
+ api.upload_file(
249
+ path_or_fileobj=ckpt_path,
250
+ path_in_repo=f"checkpoints/step_{step}.pt",
251
+ repo_id=args.hub_model_id,
252
+ )
253
+ print(f" Pushed to hub: {args.hub_model_id}")
254
+ except Exception as e:
255
+ print(f" Hub push failed: {e}")
256
+
257
+ print(f"\nTraining complete! {step} steps in {time.time() - start_time:.0f}s")
258
+ print(f"Final ρ(A): {model.get_spectral_radius():.6f}")
259
+
260
+ # Save final
261
+ final_path = os.path.join(args.output_dir, "final.pt")
262
+ torch.save({
263
+ "model_state_dict": model.state_dict(),
264
+ "config": asdict(cfg),
265
+ }, final_path)
266
+ print(f"Saved final model: {final_path}")
267
+
268
+
269
+ if __name__ == "__main__":
270
+ main()