{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "A100" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "code", "source": [ "!pip install -q x-transformers\n", "!pip install -q flash-attn --no-build-isolation" ], "metadata": { "id": "6q9RTvlf5IiS" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torch.optim as optim\n", "import math\n", "import os\n", "import sys\n", "import subprocess\n", "import hashlib\n", "import gc\n", "import platform\n", "from datetime import datetime\n", "from tqdm.auto import tqdm\n", "from torch.utils.data import DataLoader\n", "from torch.utils.tensorboard import SummaryWriter\n", "from transformers import RobertaTokenizerFast, get_cosine_schedule_with_warmup, DataCollatorForLanguageModeling\n", "from datasets import load_dataset\n", "from x_transformers import Encoder\n", "\n", "# ==========================================\n", "# 1. CONFIGURATION\n", "# ==========================================\n", "# YOUR REPO ID (Created in previous step)\n", "HF_ID = \"prism-lab/wikitext-103-prism-32k-seq4k\"\n", "\n", "# Hyperparameters\n", "VOCAB_SIZE = 32768\n", "SEQ_LEN = 4096\n", "BATCH_SIZE = 8\n", "EPOCHS = 40\n", "LR = 1e-3\n", "D_MODEL = 512\n", "RESUME_PATH = None\n", "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "torch.set_float32_matmul_precision(\"high\")\n", "\n", "# ==========================================\n", "# 2. DATA PIPELINE (The \"Pro\" Way)\n", "# ==========================================\n", "def prepare_data_from_hub():\n", " print(f\"โฌ‡๏ธ Pulling Pre-Tokenized Data from {HF_ID}...\")\n", "\n", " # 1. Load Tokenizer (Instant)\n", " # This pulls the exact tokenizer you uploaded\n", " tokenizer = RobertaTokenizerFast.from_pretrained(HF_ID)\n", "\n", " # 2. Load Dataset (Instant)\n", " # This pulls the already chunked/tokenized data\n", " dataset = load_dataset(HF_ID)\n", "\n", " print(f\"โœ… Loaded {len(dataset['train'])} training chunks.\")\n", "\n", " # 3. Collator\n", " data_collator = DataCollatorForLanguageModeling(\n", " tokenizer=tokenizer,\n", " mlm=True,\n", " mlm_probability=0.15\n", " )\n", "\n", " return dataset, data_collator\n", "\n", "\n", "class FNetBlock(nn.Module):\n", " def __init__(self, d_model, d_ff, dropout):\n", " super().__init__()\n", " self.norm_mix = nn.LayerNorm(d_model) # LayerNorm is safer for FNet than RMSNorm\n", " self.norm_ff = nn.LayerNorm(d_model)\n", "\n", " self.mix_dropout = nn.Dropout(dropout)\n", "\n", " self.ff = nn.Sequential(\n", " nn.Linear(d_model, d_ff),\n", " nn.GELU(),\n", " nn.Dropout(dropout),\n", " nn.Linear(d_ff, d_model),\n", " nn.Dropout(dropout)\n", " )\n", "\n", " def forward(self, x):\n", " # 1. Fourier Mixing Branch\n", " residual = x\n", " x = self.norm_mix(x)\n", "\n", " # --- THE FIX ---\n", " with torch.cuda.amp.autocast(enabled=False):\n", " x = x.float()\n", " # norm='ortho' makes the FFT energy-preserving.\n", " # Output magnitude will match input magnitude (~1).\n", " x = torch.fft.fftn(x, dim=(-2, -1), norm='ortho').real\n", " x = x.to(dtype=residual.dtype)\n", " # ---------------\n", "\n", " # Now 'x' and 'residual' have roughly same magnitude.\n", " # The skip connection works again.\n", " x = self.mix_dropout(x)\n", " x = x + residual\n", "\n", " # 2. Feed Forward Branch\n", " residual = x\n", " x = self.norm_ff(x)\n", " x = self.ff(x)\n", " return x + residual\n", "\n", "\n", "class FNetEncoder(nn.Module):\n", " def __init__(self, depth, d_model, d_ff, dropout):\n", " super().__init__()\n", " self.layers = nn.ModuleList([\n", " FNetBlock(d_model, d_ff, dropout) for _ in range(depth)\n", " ])\n", " # [FIX] Use LayerNorm here to match the blocks\n", " self.norm_out = nn.LayerNorm(d_model)\n", "\n", " def forward(self, x):\n", " for layer in self.layers:\n", " x = layer(x)\n", " return self.norm_out(x)\n", "\n", "\n", "class HybridFNetMLM(nn.Module):\n", " def __init__(self, vocab_size, d_model, seq_len, d_ff, dropout):\n", " super().__init__()\n", "\n", " # 1. Standard Embeddings + Absolute Positions\n", " # (FNet NEEDS these because FFT is position-blind)\n", " self.token_emb = nn.Embedding(vocab_size, d_model)\n", " self.pos_emb = nn.Parameter(torch.randn(1, seq_len, d_model) * 0.02)\n", " self.dropout = nn.Dropout(dropout)\n", "\n", " self.fnet_encoder = FNetEncoder(\n", " depth=6,\n", " d_model=d_model,\n", " d_ff=d_ff,\n", " dropout=dropout\n", " )\n", "\n", " # 3. The Attention Cap (1 Layer) -> YOUR CONFIGURATION\n", " self.transformer_cap = Encoder(\n", " dim=d_model,\n", " depth=1, # Just 1 layer\n", " heads=8,\n", " rotary_pos_emb=True, # RoPE (Hybrid Positioning: Absolute for FNet, Rotary for Attn)\n", " attn_flash=True,\n", " attn_dropout=dropout,\n", " ff_dropout=dropout\n", " # Removed 'dim_head' (fixes your error)\n", " # Removed 'use_rmsnorm' (matches your snippet)\n", " # Removed 'ff_glu' (matches your snippet)\n", " )\n", "\n", " # 4. MLM Head\n", " self.final_norm = nn.LayerNorm(d_model)\n", " self.to_logits = nn.Linear(d_model, vocab_size)\n", "\n", " # Weight Tying\n", " self.to_logits.weight = self.token_emb.weight\n", "\n", " def forward(self, input_ids):\n", " # A. Embedding\n", " x = self.token_emb(input_ids)\n", " b, n, d = x.shape\n", "\n", " # Add Absolute Positions (Crucial for FNet layers)\n", " x = x + self.pos_emb[:, :n, :]\n", " x = self.dropout(x)\n", "\n", " x = self.fnet_encoder(x)\n", "\n", " # C. Attention Refinement (1 Layer)\n", " # Note: This layer will internally apply RoPE to Q/K\n", " x = self.transformer_cap(x)\n", "\n", " # D. Output\n", " x = self.final_norm(x)\n", " return self.to_logits(x)\n", "\n", "# ==========================================\n", "# INSTANTIATE MODEL\n", "# ==========================================\n", "print(\"๐Ÿ—๏ธ Constructing Hybrid FNet (6-Spectral + 1-Attention)...\")\n", "\n", "def count_active_parameters(model):\n", " print(f\"\\n{'='*60}\")\n", " print(f\"๐Ÿงฉ DETAILED PARAMETER BREAKDOWN\")\n", " print(f\"{'='*60}\")\n", "\n", " # 1. Identify Parameter Groups\n", " # ----------------------------\n", " embedding_ids = set()\n", " active_ids = set()\n", " unique_params = set()\n", "\n", " # --- MEMORY (Embeddings & Encodings) ---\n", " embedding_params = 0\n", " for p in model.token_emb.parameters():\n", " embedding_params += p.numel()\n", " embedding_ids.add(id(p))\n", " unique_params.add(id(p))\n", "\n", " pos_params = model.pos_emb.numel()\n", " embedding_ids.add(id(model.pos_emb))\n", " unique_params.add(id(model.pos_emb))\n", "\n", " total_memory = embedding_params + pos_params\n", "\n", " # --- LOGIC (Active Processing) ---\n", " active_count = 0\n", " for name, param in model.named_parameters():\n", " if id(param) in embedding_ids:\n", " continue\n", " if id(param) not in active_ids:\n", " active_count += param.numel()\n", " active_ids.add(id(param))\n", " unique_params.add(id(param))\n", "\n", " # 2. Calculate Totals\n", " # -------------------\n", " total_physical_params = total_memory + active_count\n", "\n", " # 3. Print Report (FIXED SYNTAX)\n", " # -------------------\n", " print(f\"{'Component':<25} | {'Count':<15} | {'% of Model':<10}\")\n", " print(f\"{'-'*60}\")\n", "\n", " print(f\"{'Token Embeddings':<25} | {embedding_params:<15,} | {embedding_params/total_physical_params:.1%}\")\n", " print(f\"{'Positional Encodings':<25} | {pos_params:<15,} | {pos_params/total_physical_params:.1%}\")\n", " print(f\"{'[MEMORY TOTAL]':<25} | {total_memory:<15,} | {total_memory/total_physical_params:.1%}\")\n", " print(f\"{'-'*60}\")\n", "\n", " fnet_params = sum(p.numel() for p in model.fnet_encoder.parameters())\n", " cap_params = sum(p.numel() for p in model.transformer_cap.parameters())\n", " misc_params = active_count - fnet_params - cap_params\n", "\n", " print(f\"{'FNet Encoder (6 Layers)':<25} | {fnet_params:<15,} | {fnet_params/total_physical_params:.1%}\")\n", " print(f\"{'Transformer Cap (1 Layer)':<25}| {cap_params:<15,} | {cap_params/total_physical_params:.1%}\")\n", " print(f\"{'Norms & Biases':<25} | {misc_params:<15,} | {misc_params/total_physical_params:.1%}\")\n", " print(f\"{'[ACTIVE LOGIC TOTAL]':<25} | {active_count:<15,} | {active_count/total_physical_params:.1%}\")\n", "\n", " print(f\"{'='*60}\")\n", " print(f\"๐Ÿ“ข FINAL ACTIVE PARAMETERS: {active_count / 1_000_000:.2f} M\")\n", " print(f\"{'='*60}\\n\")\n", "\n", " return active_count\n", "\n", "\n" ], "metadata": { "id": "V7DOwmmUjyin" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# ==========================================\n", "# 3. INITIALIZATION FUNCTION (FNet Specific)\n", "# ==========================================\n", "def init_fnet_weights(model):\n", " print(\"โœจ Applying BERT-Style Initialization (N(0, 0.02))...\")\n", "\n", " for name, module in model.named_modules():\n", " # A. Linear Layers (Projections, FFNs)\n", " if isinstance(module, nn.Linear):\n", " module.weight.data.normal_(mean=0.0, std=0.02)\n", " if module.bias is not None:\n", " module.bias.data.zero_()\n", "\n", " # B. Embeddings (Tokens)\n", " elif isinstance(module, nn.Embedding):\n", " module.weight.data.normal_(mean=0.0, std=0.02)\n", " if module.padding_idx is not None:\n", " module.weight.data[module.padding_idx].zero_()\n", "\n", " # C. LayerNorms (Stability)\n", " elif isinstance(module, nn.LayerNorm):\n", " if module.bias is not None: # <--- FIX IS HERE\n", " module.bias.data.zero_()\n", " if module.weight is not None:\n", " module.weight.data.fill_(1.0)\n", "\n", " # D. Positional Embeddings (Manually handle the nn.Parameter)\n", " if hasattr(model, 'pos_emb') and model.pos_emb is not None:\n", " model.pos_emb.data.normal_(mean=0.0, std=0.02)\n", "\n", " print(\"โœ… Initialization Complete.\")\n", "\n", "\n", "# ==========================================\n", "# 4. LOGGING UTILITIES\n", "# ==========================================\n", "def generate_run_id():\n", " raw = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")\n", " return hashlib.md5(raw.encode()).hexdigest()[:8]\n", "\n", "def log_full_environment(save_dir, run_id, config):\n", " log_path = os.path.join(save_dir, f\"env_metadata_{run_id}.txt\")\n", "\n", " # 1. Gather System Info\n", " sys_info = {\n", " \"Python Version\": sys.version.split()[0],\n", " \"OS\": platform.platform(),\n", " \"PyTorch Version\": torch.__version__,\n", " \"CUDA Available\": torch.cuda.is_available(),\n", " \"CUDNN Version\": torch.backends.cudnn.version() if torch.cuda.is_available() else \"N/A\"\n", " }\n", "\n", " # 2. Gather GPU Info\n", " gpu_info = []\n", " if torch.cuda.is_available():\n", " for i in range(torch.cuda.device_count()):\n", " props = torch.cuda.get_device_properties(i)\n", " gpu_info.append(f\"GPU {i}: {props.name} | VRAM: {props.total_memory / 1e9:.2f} GB\")\n", " else:\n", " gpu_info.append(\"No GPU Detected\")\n", "\n", " # 3. Gather Pip Freeze\n", " try:\n", " pip_packages = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).decode('utf-8')\n", " except Exception as e:\n", " pip_packages = f\"Could not retrieve pip packages: {e}\"\n", "\n", " # 4. Write to File\n", " with open(log_path, \"w\") as f:\n", " f.write(f\"๐Ÿงช EXPERIMENT METADATA | Run ID: {run_id}\\n\")\n", " f.write(f\"{'='*60}\\n\\n\")\n", "\n", " f.write(f\"--- [1] CONFIGURATION ---\\n\")\n", " for k, v in config.items():\n", " f.write(f\"{k}: {v}\\n\")\n", " f.write(\"\\n\")\n", "\n", " f.write(f\"--- [2] SYSTEM HARDWARE ---\\n\")\n", " for k, v in sys_info.items():\n", " f.write(f\"{k}: {v}\\n\")\n", " for g in gpu_info:\n", " f.write(f\"{g}\\n\")\n", " f.write(\"\\n\")\n", "\n", " f.write(f\"--- [3] INSTALLED PACKAGES (pip freeze) ---\\n\")\n", " f.write(pip_packages)\n", "\n", " print(f\"๐Ÿ“ Full Environment Snapshot (GPU + Pip) saved to: {log_path}\")\n", "\n", "\n", "def save_checkpoint(path, model, optimizer, scheduler, epoch, best_loss, config):\n", " torch.save({\n", " 'epoch': epoch,\n", " 'model_state_dict': model.state_dict(),\n", " 'optimizer_state_dict': optimizer.state_dict(),\n", " 'scheduler_state_dict': scheduler.state_dict(),\n", " 'best_val_loss': best_loss,\n", " 'config': config\n", " }, path)\n", "\n", "# ==========================================\n", "# 5. TRAINING LOOP\n", "# ==========================================\n", "def run_wikitext_training(experiment_name=\"FNet_Encoder\"):\n", " from google.colab import drive\n", " if not os.path.exists('/content/drive'): drive.mount('/content/drive')\n", "\n", " # --- SETUP DIRS ---\n", " if RESUME_PATH and os.path.exists(RESUME_PATH):\n", " print(f\"๐Ÿ”„ RESUMING FROM: {RESUME_PATH}\")\n", " checkpoint = torch.load(RESUME_PATH, map_location=DEVICE)\n", " SAVE_DIR = os.path.dirname(RESUME_PATH)\n", " run_id = checkpoint.get('config', {}).get('run_id', 'resumed')\n", " else:\n", " run_id = generate_run_id()\n", " timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n", " folder_name = f\"{experiment_name}_{timestamp}_{run_id}\"\n", " SAVE_DIR = os.path.join(\"/content/drive/My Drive/PRISM_Experiments\", folder_name)\n", " os.makedirs(SAVE_DIR, exist_ok=True)\n", " print(f\"๐Ÿ’พ Checkpoints: {SAVE_DIR}\")\n", "\n", " writer = SummaryWriter(log_dir=SAVE_DIR)\n", " GRAD_ACCUM = 4\n", "\n", " # Load Data\n", " lm_datasets, data_collator = prepare_data_from_hub()\n", "\n", " # Create Loaders\n", " train_loader = DataLoader(\n", " lm_datasets[\"train\"], batch_size=BATCH_SIZE, shuffle=True,\n", " collate_fn=data_collator, num_workers=2, pin_memory=True,\n", " prefetch_factor=2, persistent_workers=True\n", " )\n", " valid_loader = DataLoader(\n", " lm_datasets[\"validation\"], batch_size=BATCH_SIZE,\n", " collate_fn=data_collator, num_workers=2, pin_memory=True\n", " )\n", " test_loader = DataLoader(\n", " lm_datasets[\"test\"], batch_size=BATCH_SIZE,\n", " collate_fn=data_collator, num_workers=2, pin_memory=True\n", " )\n", "\n", " print(\"\\nโšก INITIALIZING HYBRID FNET MODEL...\")\n", "\n", " # INSTANTIATE\n", " model = HybridFNetMLM(\n", " vocab_size=VOCAB_SIZE,\n", " d_model=D_MODEL,\n", " seq_len=SEQ_LEN,\n", " d_ff=D_MODEL * 4,\n", " dropout=0.1\n", " ).to(DEVICE)\n", "\n", " # OPTIMIZER\n", " optimizer = optim.AdamW(model.parameters(), lr=LR, weight_decay=0.01)\n", "\n", " total_steps = (len(train_loader) // GRAD_ACCUM) * EPOCHS\n", " scheduler = get_cosine_schedule_with_warmup(\n", " optimizer, num_warmup_steps=int(0.05 * total_steps), num_training_steps=total_steps\n", " )\n", " criterion = nn.CrossEntropyLoss()\n", "\n", " start_epoch = 0\n", " best_val_loss = float('inf')\n", "\n", " # RESUME OR INIT\n", " if RESUME_PATH and os.path.exists(RESUME_PATH):\n", " model.load_state_dict(checkpoint['model_state_dict'])\n", " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", " scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n", " start_epoch = checkpoint['epoch'] + 1\n", " best_val_loss = checkpoint['best_val_loss']\n", " del checkpoint\n", " torch.cuda.empty_cache()\n", " else:\n", " # [UPDATED] CALL THE FNET INITIALIZATION\n", " init_fnet_weights(model)\n", "\n", " # METRICS\n", " try:\n", " active_params = count_active_parameters(model) # Uses function defined in prev step\n", " except:\n", " print(\"โš ๏ธ Parameter counter not found, skipping detailed breakdown.\")\n", "\n", " total_params = sum(p.numel() for p in model.parameters())\n", " print(f\"โœ… Model Ready. Total Raw Params: {total_params/1e6:.2f}M\")\n", "\n", " log_full_environment(SAVE_DIR, run_id, {\n", " \"model\": \"HybridFNetMLM\",\n", " \"d_model\": D_MODEL,\n", " \"depth\": \"6+1\",\n", " \"vocab\": VOCAB_SIZE,\n", " \"batch\": BATCH_SIZE,\n", " \"lr\": LR,\n", " \"active_params\": f\"{active_params/1e6:.2f}M\"\n", " })\n", "\n", " print(f\"\\n๐Ÿš€ STARTING (Ep {start_epoch+1} to {EPOCHS})\")\n", " global_step = (len(train_loader) // GRAD_ACCUM) * start_epoch\n", "\n", " for epoch in range(start_epoch, EPOCHS):\n", " model.train()\n", " pbar = tqdm(train_loader, desc=f\"Ep {epoch+1}/{EPOCHS}\")\n", "\n", " for step, batch in enumerate(pbar):\n", " x, y = batch['input_ids'].to(DEVICE), batch['labels'].to(DEVICE)\n", "\n", " # FNet Forward Pass\n", " logits = model(x)\n", "\n", " # Loss Calculation\n", " loss = criterion(logits.view(-1, VOCAB_SIZE), y.view(-1)) / GRAD_ACCUM\n", " loss.backward()\n", "\n", " if (step + 1) % GRAD_ACCUM == 0:\n", " # 1. Calc Norm\n", " grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n", "\n", " # 2. Step\n", " optimizer.step()\n", " scheduler.step()\n", " optimizer.zero_grad()\n", " global_step += 1\n", "\n", " # 3. LOGGING\n", " actual_loss = loss.item() * GRAD_ACCUM\n", " writer.add_scalar('Train/Loss', actual_loss, global_step)\n", " writer.add_scalar('Train/GradNorm', grad_norm.item(), global_step)\n", " writer.add_scalar('Train/LR', scheduler.get_last_lr()[0], global_step)\n", "\n", " # 4. Progress Bar\n", " pbar.set_postfix({\n", " 'loss': f\"{actual_loss:.4f}\",\n", " 'gnorm': f\"{grad_norm.item():.2f}\"\n", " })\n", "\n", " # VALIDATION\n", " model.eval()\n", " val_loss = 0\n", " with torch.no_grad():\n", " for batch in valid_loader:\n", " x, y = batch['input_ids'].to(DEVICE), batch['labels'].to(DEVICE)\n", " val_loss += criterion(model(x).view(-1, VOCAB_SIZE), y.view(-1)).item()\n", "\n", " avg_val_loss = val_loss / len(valid_loader)\n", " ppl = math.exp(avg_val_loss) if avg_val_loss < 100 else float('inf')\n", "\n", " print(f\"โœจ Epoch {epoch+1} | Val Loss: {avg_val_loss:.4f} | PPL: {ppl:.2f}\")\n", " writer.add_scalar('Val/PPL', ppl, epoch+1)\n", " writer.add_scalar('Val/Loss', avg_val_loss, epoch+1)\n", "\n", " config_dump = {\"epoch\": epoch, \"run_id\": run_id}\n", " save_checkpoint(os.path.join(SAVE_DIR, \"last.pt\"), model, optimizer, scheduler, epoch, best_val_loss, config_dump)\n", "\n", " if avg_val_loss < best_val_loss:\n", " best_val_loss = avg_val_loss\n", " torch.save(model.state_dict(), os.path.join(SAVE_DIR, \"best.pt\"))\n", " print(\" ๐Ÿ† New Best Model Saved!\")\n", "\n", " # FINAL TEST\n", " best_path = os.path.join(SAVE_DIR, \"best.pt\")\n", " if os.path.exists(best_path):\n", " model.load_state_dict(torch.load(best_path))\n", " model.eval()\n", " test_loss = 0\n", " with torch.no_grad():\n", " for batch in tqdm(test_loader, desc=\"Testing\"):\n", " x, y = batch['input_ids'].to(DEVICE), batch['labels'].to(DEVICE)\n", " test_loss += criterion(model(x).view(-1, VOCAB_SIZE), y.view(-1)).item()\n", " print(f\"๐Ÿ† FINAL TEST PPL: {math.exp(test_loss/len(test_loader)):.2f}\")\n", "\n", " writer.close()\n", " return model" ], "metadata": { "id": "-TNEv89gkS1k" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "if __name__ == \"__main__\":\n", "\n", "\n", " # 1. Run the Training Routine\n", " # This handles Model Creation -> Analysis -> Training -> Saving\n", " trained_prism = run_wikitext_training()\n", "\n", " # 2. Cleanup & Shutdown\n" ], "metadata": { "id": "KaiJU0tPkVp-" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "from google.colab import runtime\n", "runtime.unassign()" ], "metadata": { "id": "bxFTYWHVqcSI" }, "execution_count": null, "outputs": [] } ] }