import math import os import time from contextlib import nullcontext from datetime import datetime from functools import partial import torch from model import Transformer, ModelArgs from torch.distributed import destroy_process_group, init_process_group from torch.nn.parallel import DistributedDataParallel as DDP from pre_training_script import Task from export import model_export # ----------------------------------------------------------------------------- # I/O out_dir = "out" eval_interval = 200 log_interval = 1 eval_iters = 100 eval_only = False # if True, script exits right after the first eval always_save_checkpoint = True # if True, always save a checkpoint after each eval init_from = "scratch" # 'scratch' or 'resume' # wandb logging wandb_log = False # disabled by default wandb_project = "llamac" wandb_run_name = "run" + datetime.now().strftime("%Y_%m_%d_%H_%M_%S") # data batch_size = 32 # if gradient_accumulation_steps > 1, this is the micro-batch size max_seq_len = 384 vocab_source = "llama2" # llama2|custom vocab_size = 12000 # model dim = 192 n_layers = 6 n_heads = 6 n_kv_heads = 6 multiple_of = 16 dropout = 0.1 # adamw optimizer gradient_accumulation_steps = 4 learning_rate = 1e-3 max_iters = 20000 weight_decay = 1e-1 beta1 = 0.9 beta2 = 0.95 grad_clip = 1.0 # learning rate decay settings decay_lr = True warmup_iters = 1000 # system device = "mps" # set this for Apple Silicon dtype = "bfloat16" # float32|bfloat16|float16 compile = True # ----------------------------------------------------------------------------- config_keys = [ k for k, v in globals().items() if not k.startswith("_") and isinstance(v, (int, float, bool, str)) ] exec(open("configurator.py").read()) config = {k: globals()[k] for k in config_keys} # ----------------------------------------------------------------------------- # fixing some hyperparams lr_decay_iters = max_iters min_lr = 0.0 # validating checks assert vocab_source in ["llama2", "custom"] assert vocab_source == "custom" or vocab_size == 32000, "The vocab from Meta has 32K tokens" # DDP setup ddp = int(os.environ.get("RANK", -1)) != -1 if ddp: init_process_group(backend="nccl") ddp_rank = int(os.environ["RANK"]) ddp_local_rank = int(os.environ["LOCAL_RANK"]) ddp_world_size = int(os.environ["WORLD_SIZE"]) device = f"cuda:{ddp_local_rank}" torch.cuda.set_device(device) master_process = ddp_rank == 0 seed_offset = ddp_rank assert gradient_accumulation_steps % ddp_world_size == 0 gradient_accumulation_steps //= ddp_world_size else: master_process = True seed_offset = 0 ddp_world_size = 1 tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len if master_process: print(f"tokens per iteration will be: {tokens_per_iter:,}") print(f"breaks down as: {gradient_accumulation_steps} grad accum steps * {ddp_world_size} processes * {batch_size} batch size * {max_seq_len} max seq len") if master_process: os.makedirs(out_dir, exist_ok=True) torch.manual_seed(1337 + seed_offset) # allow TF32 (only relevant for CUDA, ignored on MPS/CPU) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True # -------------------- FIXED: recognize MPS -------------------- if "cuda" in device: device_type = "cuda" elif "mps" in device: device_type = "mps" else: device_type = "cpu" ptdtype = {"float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16}[dtype] ctx = ( nullcontext() if device_type == "cpu" else torch.amp.autocast(device_type=device_type, dtype=ptdtype) ) # -------------------------------------------------------------- # task-specific setup iter_batches = partial( Task.iter_batches, batch_size=batch_size, max_seq_len=max_seq_len, vocab_size=vocab_size, vocab_source=vocab_source, device=device, num_workers=0, ) iter_num = 0 best_val_loss = 1e9 # model init model_args = dict( dim=dim, n_layers=n_layers, n_heads=n_heads, n_kv_heads=n_kv_heads, vocab_size=vocab_size, multiple_of=multiple_of, max_seq_len=max_seq_len, dropout=dropout, ) if init_from == "scratch": print("Initializing a new model from scratch") gptconf = ModelArgs(**model_args) model = Transformer(gptconf) elif init_from == "resume": print(f"Resuming training from {out_dir}") ckpt_path = os.path.join(out_dir, "ckpt.pt") checkpoint = torch.load(ckpt_path, map_location=device) checkpoint_model_args = checkpoint["model_args"] for k in ["dim", "n_layers", "n_heads", "n_kv_heads", "vocab_size", "multiple_of", "max_seq_len"]: model_args[k] = checkpoint_model_args[k] gptconf = ModelArgs(**model_args) model = Transformer(gptconf) state_dict = checkpoint["model"] unwanted_prefix = "_orig_mod." for k, v in list(state_dict.items()): if k.startswith(unwanted_prefix): state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) model.load_state_dict(state_dict) iter_num = checkpoint["iter_num"] best_val_loss = checkpoint["best_val_loss"] model.to(device) # -------------------- FIXED: GradScaler for CUDA only -------------------- if device_type == "cuda": scaler = torch.cuda.amp.GradScaler(enabled=(dtype == "float16")) else: scaler = None # no grad scaler on MPS/CPU # ------------------------------------------------------------------------ optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type) if init_from == "resume" and "optimizer" in checkpoint: optimizer.load_state_dict(checkpoint["optimizer"]) checkpoint = None if compile: print("compiling the model... (takes a ~minute)") unoptimized_model = model model = torch.compile(model) if ddp: prefix = "_orig_mod." if compile else "" model._ddp_params_and_buffers_to_ignore = {prefix + "freqs_cis"} model = DDP(model, device_ids=[ddp_local_rank]) # helps estimate loss @torch.no_grad() def estimate_loss(): out = {} model.eval() for split in ["train", "val"]: batch_iter = iter_batches(split=split) losses = torch.zeros(eval_iters) for k in range(eval_iters): X, Y = next(batch_iter) with ctx: logits = model(X, Y) loss = raw_model.last_loss losses[k] = loss.item() out[split] = losses.mean() model.train() return out def get_lr(it): if it < warmup_iters: return learning_rate * it / warmup_iters if it > lr_decay_iters: return min_lr decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) return min_lr + coeff * (learning_rate - min_lr) if wandb_log and master_process: import wandb wandb.init(project=wandb_project, name=wandb_run_name, config=config) train_batch_iter = iter_batches(split="train") X, Y = next(train_batch_iter) t0 = time.time() local_iter_num = 0 raw_model = model.module if ddp else model running_mfu = -1.0 while True: lr = get_lr(iter_num) if decay_lr else learning_rate for param_group in optimizer.param_groups: param_group["lr"] = lr if iter_num % eval_interval == 0 and master_process: losses = estimate_loss() print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}") if wandb_log: try: wandb.log( { "iter": iter_num, "tokens": iter_num * tokens_per_iter, "loss/train": losses["train"], "loss/val": losses["val"], "lr": lr, "mfu": running_mfu * 100, }, step=iter_num ) except Exception as e: print(f"logging to wandb failed: {e}") if always_save_checkpoint: best_val_loss = losses["val"] if iter_num > 0: checkpoint = { "model": raw_model.state_dict(), "optimizer": optimizer.state_dict(), "model_args": model_args, "iter_num": iter_num, "best_val_loss": best_val_loss, "config": config, } print(f"saving checkpoint to {out_dir}") torch.save(checkpoint, os.path.join(out_dir, "ckpt.pt")) model_export(raw_model, os.path.join(out_dir, "model.bin"), version=0) if iter_num == 0 and eval_only: break for micro_step in range(gradient_accumulation_steps): if ddp: model.require_backward_grad_sync = micro_step == gradient_accumulation_steps - 1 with ctx: logits = model(X, Y) loss = raw_model.last_loss loss = loss / gradient_accumulation_steps X, Y = next(train_batch_iter) if scaler is not None: scaler.scale(loss).backward() else: loss.backward() if grad_clip != 0.0: if scaler is not None: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) if scaler is not None: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad(set_to_none=True) t1 = time.time() dt = t1 - t0 t0 = t1 if iter_num % log_interval == 0 and master_process: lossf = loss.item() * gradient_accumulation_steps if local_iter_num >= 5: mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt) running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu print( f"{iter_num} | loss {lossf:.4f} | lr {lr:e} | {dt*1000:.2f}ms | mfu {running_mfu*100:.2f}%" ) iter_num += 1 local_iter_num += 1 if iter_num > max_iters: break if ddp: destroy_process_group()