{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [], "gpuType": "T4" }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU" }, "cells": [ { "cell_type": "markdown", "metadata": { "id": "GlQt0wJw149J" }, "source": [ "# Ablation: GCN vs MPNN Edge Features (v3)\n", "\n", "**Question:** Does explicit edge-feature message passing improve preconditioner quality over implicit GCN convolution?\n", "\n", "- GCN (`ContextResGCN`) vs MPNN (`ContextResMPNN`) with matched hyperparameters\n", "- 5 training seeds for statistical significance\n", "- 3 evaluation domains: diffusion, advection (in-distribution), graph Laplacian (OOD domain)\n", "- Jacobi baseline for experiment verification\n", "- Primary metric: average FGMRES iterations\n", "- Incremental save/resume: results saved after each seed" ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "0mx9uK5u149L", "outputId": "08c589c6-981b-4dd2-ed6c-a45c57c156cb" }, "source": [ "!pip install matrixpfn" ], "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting matrixpfn\n", " Using cached matrixpfn-0.1.12-py3-none-any.whl.metadata (4.5 kB)\n", "Collecting huggingface-hub>=1.6.0 (from matrixpfn)\n", " Using cached huggingface_hub-1.6.0-py3-none-any.whl.metadata (13 kB)\n", "Requirement already satisfied: matplotlib>=3.7 in /usr/local/lib/python3.12/dist-packages (from matrixpfn) (3.10.0)\n", "Requirement already satisfied: numpy>=1.26 in /usr/local/lib/python3.12/dist-packages (from matrixpfn) (2.0.2)\n", "Collecting pyamg>=5.0 (from matrixpfn)\n", " Using cached pyamg-5.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl.metadata (8.1 kB)\n", "Collecting pyarrow>=23.0.1 (from matrixpfn)\n", " Using cached pyarrow-23.0.1-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (3.1 kB)\n", "Collecting python-igraph>=1.0 (from matrixpfn)\n", " Using cached python_igraph-1.0.0-py3-none-any.whl.metadata (3.1 kB)\n", "Requirement already satisfied: scipy>=1.11 in /usr/local/lib/python3.12/dist-packages (from matrixpfn) (1.16.3)\n", "Requirement already satisfied: torch>=2.0 in /usr/local/lib/python3.12/dist-packages (from matrixpfn) (2.10.0+cu128)\n", "Requirement already satisfied: tqdm>=4.60 in /usr/local/lib/python3.12/dist-packages (from matrixpfn) (4.67.3)\n", "Requirement already satisfied: filelock>=3.10.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (3.24.3)\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (2025.3.0)\n", "Collecting hf-xet<2.0.0,>=1.3.2 (from huggingface-hub>=1.6.0->matrixpfn)\n", " Using cached hf_xet-1.3.2-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl.metadata (4.9 kB)\n", "Requirement already satisfied: httpx<1,>=0.23.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (0.28.1)\n", "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (26.0)\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (6.0.3)\n", "Requirement already satisfied: typer in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (0.24.1)\n", "Requirement already satisfied: typing-extensions>=4.1.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub>=1.6.0->matrixpfn) (4.15.0)\n", "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (1.3.3)\n", "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (0.12.1)\n", "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (4.61.1)\n", "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (1.4.9)\n", "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (11.3.0)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (3.3.2)\n", "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib>=3.7->matrixpfn) (2.9.0.post0)\n", "Collecting igraph==1.0.0 (from python-igraph>=1.0->matrixpfn)\n", " Using cached igraph-1.0.0-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (4.4 kB)\n", "Collecting texttable>=1.6.2 (from igraph==1.0.0->python-igraph>=1.0->matrixpfn)\n", " Using cached texttable-1.7.0-py2.py3-none-any.whl.metadata (9.8 kB)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (75.2.0)\n", "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (1.14.0)\n", "Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (3.6.1)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (3.1.6)\n", "Requirement already satisfied: cuda-bindings==12.9.4 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.9.4)\n", "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.8.93 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.93)\n", "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.8.90 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.90)\n", "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.8.90 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.90)\n", "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (9.10.2.21)\n", "Requirement already satisfied: nvidia-cublas-cu12==12.8.4.1 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.4.1)\n", "Requirement already satisfied: nvidia-cufft-cu12==11.3.3.83 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (11.3.3.83)\n", "Requirement already satisfied: nvidia-curand-cu12==10.3.9.90 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (10.3.9.90)\n", "Requirement already satisfied: nvidia-cusolver-cu12==11.7.3.90 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (11.7.3.90)\n", "Requirement already satisfied: nvidia-cusparse-cu12==12.5.8.93 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.5.8.93)\n", "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (0.7.1)\n", "Requirement already satisfied: nvidia-nccl-cu12==2.27.5 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (2.27.5)\n", "Requirement already satisfied: nvidia-nvshmem-cu12==3.4.5 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (3.4.5)\n", "Requirement already satisfied: nvidia-nvtx-cu12==12.8.90 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.90)\n", "Requirement already satisfied: nvidia-nvjitlink-cu12==12.8.93 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (12.8.93)\n", "Requirement already satisfied: nvidia-cufile-cu12==1.13.1.3 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (1.13.1.3)\n", "Requirement already satisfied: triton==3.6.0 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0->matrixpfn) (3.6.0)\n", "Requirement already satisfied: cuda-pathfinder~=1.1 in /usr/local/lib/python3.12/dist-packages (from cuda-bindings==12.9.4->torch>=2.0->matrixpfn) (1.4.0)\n", "Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1,>=0.23.0->huggingface-hub>=1.6.0->matrixpfn) (4.12.1)\n", "Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1,>=0.23.0->huggingface-hub>=1.6.0->matrixpfn) (2026.2.25)\n", "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1,>=0.23.0->huggingface-hub>=1.6.0->matrixpfn) (1.0.9)\n", "Requirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1,>=0.23.0->huggingface-hub>=1.6.0->matrixpfn) (3.11)\n", "Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1,>=0.23.0->huggingface-hub>=1.6.0->matrixpfn) (0.16.0)\n", "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.7->matplotlib>=3.7->matrixpfn) (1.17.0)\n", "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch>=2.0->matrixpfn) (1.3.0)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0->matrixpfn) (3.0.3)\n", "Requirement already satisfied: click>=8.2.1 in /usr/local/lib/python3.12/dist-packages (from typer->huggingface-hub>=1.6.0->matrixpfn) (8.3.1)\n", "Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer->huggingface-hub>=1.6.0->matrixpfn) (1.5.4)\n", "Requirement already satisfied: rich>=12.3.0 in /usr/local/lib/python3.12/dist-packages (from typer->huggingface-hub>=1.6.0->matrixpfn) (13.9.4)\n", "Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from typer->huggingface-hub>=1.6.0->matrixpfn) (0.0.4)\n", "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=12.3.0->typer->huggingface-hub>=1.6.0->matrixpfn) (4.0.0)\n", "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.12/dist-packages (from rich>=12.3.0->typer->huggingface-hub>=1.6.0->matrixpfn) (2.19.2)\n", "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=12.3.0->typer->huggingface-hub>=1.6.0->matrixpfn) (0.1.2)\n", "Using cached matrixpfn-0.1.12-py3-none-any.whl (47 kB)\n", "Using cached huggingface_hub-1.6.0-py3-none-any.whl (612 kB)\n", "Using cached pyamg-5.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (1.9 MB)\n", "Using cached pyarrow-23.0.1-cp312-cp312-manylinux_2_28_x86_64.whl (47.6 MB)\n", "Using cached python_igraph-1.0.0-py3-none-any.whl (9.2 kB)\n", "Using cached igraph-1.0.0-cp39-abi3-manylinux_2_28_x86_64.whl (5.7 MB)\n", "Downloading hf_xet-1.3.2-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (4.2 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.2/4.2 MB\u001b[0m \u001b[31m43.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hDownloading texttable-1.7.0-py2.py3-none-any.whl (10 kB)\n", "Installing collected packages: texttable, pyarrow, igraph, hf-xet, python-igraph, pyamg, huggingface-hub, matrixpfn\n", " Attempting uninstall: pyarrow\n", " Found existing installation: pyarrow 18.1.0\n", " Uninstalling pyarrow-18.1.0:\n", " Successfully uninstalled pyarrow-18.1.0\n", " Attempting uninstall: hf-xet\n", " Found existing installation: hf-xet 1.3.1\n", " Uninstalling hf-xet-1.3.1:\n", " Successfully uninstalled hf-xet-1.3.1\n", " Attempting uninstall: huggingface-hub\n", " Found existing installation: huggingface_hub 1.5.0\n", " Uninstalling huggingface_hub-1.5.0:\n", " Successfully uninstalled huggingface_hub-1.5.0\n", "Successfully installed hf-xet-1.3.2 huggingface-hub-1.6.0 igraph-1.0.0 matrixpfn-0.1.12 pyamg-5.3.0 pyarrow-23.0.1 python-igraph-1.0.0 texttable-1.7.0\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "ZfJO3eDm149L" }, "source": [ "## Imports & Device Setup" ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "UdqQg8ob149L", "outputId": "9edb9124-ce12-4805-864e-40ce04b41ad1" }, "source": [ "import json\n", "import time\n", "import random\n", "from pathlib import Path\n", "from dataclasses import dataclass, field\n", "\n", "import numpy as np\n", "import torch\n", "\n", "from matrixpfn.generator.base import MatrixDomain\n", "from matrixpfn.generator.domains.diffusion import DiffusionGenerator\n", "from matrixpfn.generator.domains.diffusion_advection import DiffusionAdvectionGenerator\n", "from matrixpfn.generator.domains.fast_graph_laplacian import FastGraphLaplacianGenerator\n", "from matrixpfn.generator.online import OnlineMatrixDataset\n", "from matrixpfn.generator.registry import MatrixGeneratorRegistry\n", "from matrixpfn.nn.context_resgcn import ContextResGCN, ContextResMPNN\n", "from matrixpfn.precond.jacobi import Jacobi\n", "from matrixpfn.precond.matrix_pfn import MatrixPFN, TrainingConfig\n", "from matrixpfn.solver.fgmres import FGMRES, Preconditioner\n", "\n", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "print(f\"Device: {device}\")" ], "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Device: cuda\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "1WtJ8Rlt149M" }, "source": [ "## Configuration" ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "sSIXV7nv149M", "outputId": "9248bb04-7bad-439b-e432-612e58866b40" }, "source": [ "RESULTS_FILE = Path(\"/content/ablation_edge_features_v3.json\")\n", "\n", "NUM_LAYERS = 8\n", "EMBED_DIM = 16\n", "HIDDEN_DIM = 32\n", "NUM_CONTEXT_PAIRS = 5\n", "\n", "TRAINING_GRID_SIZES = (16, 24, 32)\n", "EVAL_GRID_SIZES = (16, 24, 32, 48)\n", "\n", "TRAINING_EPOCHS = 1000\n", "MATRICES_PER_EPOCH = 4\n", "BATCH_SIZE = 16\n", "LEARNING_RATE = 1e-3\n", "\n", "GMRES_RESTART = 30\n", "GMRES_MAX_ITERS = 300\n", "GMRES_RTOL = 1e-6\n", "NUM_TEST_MATRICES = 20\n", "\n", "SEEDS = [42, 123, 456, 789, 1337]\n", "\n", "TRAIN_DOMAINS = [\"diffusion\", \"advection\"]\n", "EVAL_DOMAINS = [\"diffusion\", \"advection\", \"graph_laplacian\"]\n", "\n", "print(f\"Seeds: {SEEDS}\")\n", "print(f\"Training: {TRAIN_DOMAINS} on grids {TRAINING_GRID_SIZES}, {TRAINING_EPOCHS} epochs\")\n", "print(f\"Eval: {EVAL_DOMAINS} on grids {EVAL_GRID_SIZES}\")" ], "execution_count": 3, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Seeds: [42, 123, 456, 789, 1337]\n", "Training: ['diffusion', 'advection'] on grids (16, 24, 32), 1000 epochs\n", "Eval: ['diffusion', 'advection', 'graph_laplacian'] on grids (16, 24, 32, 48)\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "NusZgIcf149M" }, "source": [ "## Utilities" ] }, { "cell_type": "code", "metadata": { "id": "idXLCklG149M" }, "source": [ "@dataclass\n", "class EvalResult:\n", " iterations: list[int] = field(default_factory=list)\n", " converged: list[bool] = field(default_factory=list)\n", " final_residuals: list[float] = field(default_factory=list)\n", " solve_times: list[float] = field(default_factory=list)\n", "\n", "\n", "def count_parameters(model: torch.nn.Module) -> int:\n", " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n", "\n", "\n", "def make_train_dataset(grid_sizes: tuple[int, ...], device: torch.device) -> OnlineMatrixDataset:\n", " diff_gen = DiffusionGenerator(grid_sizes, device)\n", " adv_gen = DiffusionAdvectionGenerator(diff_gen)\n", " registry = MatrixGeneratorRegistry({\n", " MatrixDomain.DIFFUSION: diff_gen,\n", " MatrixDomain.DIFFUSION_ADVECTION: adv_gen,\n", " })\n", " return OnlineMatrixDataset(registry, NUM_CONTEXT_PAIRS)\n", "\n", "\n", "def train_model(name: str, model: torch.nn.Module, device: torch.device, seed: int) -> dict:\n", " print(f\"\\n Training {name} ({count_parameters(model):,} params) seed={seed}\")\n", "\n", " optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n", " precond = MatrixPFN(model, model_device=device)\n", " dataset = make_train_dataset(TRAINING_GRID_SIZES, device)\n", "\n", " config = TrainingConfig(\n", " batch_size=BATCH_SIZE,\n", " epochs=TRAINING_EPOCHS,\n", " matrices_per_epoch=MATRICES_PER_EPOCH,\n", " num_context_pairs=NUM_CONTEXT_PAIRS,\n", " learning_rate=LEARNING_RATE,\n", " )\n", "\n", " tic = time.perf_counter()\n", " history = precond.train(dataset, config=config, optimizer=optimizer)\n", " train_time = time.perf_counter() - tic\n", "\n", " best_loss = history[\"best_loss\"]\n", " best_epoch = history[\"best_epoch\"]\n", " final_loss = history[\"loss\"][-1]\n", " print(f\" best={best_loss:.4e} @{best_epoch}, final={final_loss:.4e}, {train_time:.1f}s\")\n", "\n", " return {\n", " \"name\": name,\n", " \"param_count\": count_parameters(model),\n", " \"train_time\": train_time,\n", " \"train_history\": history[\"loss\"],\n", " \"best_loss\": best_loss,\n", " \"best_epoch\": best_epoch,\n", " }\n", "\n", "\n", "def generate_test_matrix(gen, num_context_pairs: int) -> tuple[torch.Tensor, torch.Tensor]:\n", " data = gen.generate_batch(1, num_context_pairs)\n", " A = torch.sparse_coo_tensor(\n", " data.indices, data.values[0], (data.n, data.n)\n", " ).coalesce().to_sparse_csc()\n", " b = torch.randn(data.n, dtype=torch.float64, device=A.device)\n", " return A, b\n", "\n", "\n", "def make_test_generator(domain: str, grid_size: int, device: torch.device):\n", " if domain == \"diffusion\":\n", " return DiffusionGenerator(grid_size, device)\n", " if domain == \"advection\":\n", " return DiffusionAdvectionGenerator(DiffusionGenerator(grid_size, device))\n", " if domain == \"graph_laplacian\":\n", " return FastGraphLaplacianGenerator(grid_size * grid_size, device)\n", " raise ValueError(f\"Unknown domain: {domain}\")\n", "\n", "\n", "def evaluate_preconditioner(\n", " precond_factory, test_matrices: list[tuple[torch.Tensor, torch.Tensor]],\n", ") -> dict:\n", " solver = FGMRES(restart=GMRES_RESTART, max_iters=GMRES_MAX_ITERS, rtol=GMRES_RTOL)\n", " er = EvalResult()\n", "\n", " for A, b in test_matrices:\n", " tic = time.perf_counter()\n", " M = precond_factory(A)\n", " sr = solver.solve(A, b, M=M, progress_bar=False)\n", " elapsed = time.perf_counter() - tic\n", "\n", " er.iterations.append(sr.iterations)\n", " er.converged.append(sr.converged)\n", " er.final_residuals.append(sr.final_residual)\n", " er.solve_times.append(elapsed)\n", "\n", " return {\n", " \"iterations\": er.iterations,\n", " \"converged\": er.converged,\n", " \"final_residuals\": er.final_residuals,\n", " \"solve_times\": er.solve_times,\n", " }\n", "\n", "\n", "def make_neural_factory(model: torch.nn.Module, device: torch.device):\n", " def factory(A: torch.Tensor):\n", " precond = MatrixPFN(model, model_device=device)\n", " precond.prepare_for_solve(A, num_context_pairs=NUM_CONTEXT_PAIRS)\n", " return precond\n", " return factory" ], "execution_count": 4, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "Iz6UY3O1149M" }, "source": [ "## Save / Resume Logic\n", "\n", "Results are saved incrementally after each seed completes. If the notebook is interrupted, re-running will skip already-completed seeds." ] }, { "cell_type": "code", "metadata": { "id": "szi5rxpl149M" }, "source": [ "def load_existing_results() -> dict:\n", " if RESULTS_FILE.exists():\n", " with open(RESULTS_FILE) as f:\n", " return json.load(f)\n", " return {\n", " \"config\": {\n", " \"num_layers\": NUM_LAYERS,\n", " \"embed_dim\": EMBED_DIM,\n", " \"hidden_dim\": HIDDEN_DIM,\n", " \"num_context_pairs\": NUM_CONTEXT_PAIRS,\n", " \"training_grid_sizes\": list(TRAINING_GRID_SIZES),\n", " \"eval_grid_sizes\": list(EVAL_GRID_SIZES),\n", " \"training_epochs\": TRAINING_EPOCHS,\n", " \"matrices_per_epoch\": MATRICES_PER_EPOCH,\n", " \"batch_size\": BATCH_SIZE,\n", " \"learning_rate\": LEARNING_RATE,\n", " \"gmres_restart\": GMRES_RESTART,\n", " \"gmres_max_iters\": GMRES_MAX_ITERS,\n", " \"gmres_rtol\": GMRES_RTOL,\n", " \"num_test_matrices\": NUM_TEST_MATRICES,\n", " \"seeds\": SEEDS,\n", " \"train_domains\": TRAIN_DOMAINS,\n", " \"eval_domains\": EVAL_DOMAINS,\n", " },\n", " \"seeds\": [],\n", " }\n", "\n", "\n", "def save_results(data: dict):\n", " RESULTS_FILE.parent.mkdir(parents=True, exist_ok=True)\n", " with open(RESULTS_FILE, \"w\") as f:\n", " json.dump(data, f, indent=2)\n", "\n", "\n", "def completed_seeds(data: dict) -> set[int]:\n", " return {s[\"seed\"] for s in data[\"seeds\"]}\n", "\n", "\n", "def set_all_seeds(seed: int):\n", " torch.manual_seed(seed)\n", " random.seed(seed)\n", " np.random.seed(seed)\n", " if torch.cuda.is_available():\n", " torch.cuda.manual_seed(seed)" ], "execution_count": 5, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "IDoC_Red149M" }, "source": [ "## Per-Seed Training & Evaluation" ] }, { "cell_type": "code", "metadata": { "id": "hR2HLdoi149M" }, "source": [ "def run_seed(seed: int, device: torch.device) -> dict:\n", " set_all_seeds(seed)\n", "\n", " print(f\"\\n{'='*70}\")\n", " print(f\"SEED {seed}\")\n", " print(f\"{'='*70}\")\n", "\n", " gcn = ContextResGCN(\n", " num_layers=NUM_LAYERS, embed=EMBED_DIM, hidden=HIDDEN_DIM,\n", " drop_rate=0.0, num_context_pairs=NUM_CONTEXT_PAIRS, dtype=torch.float32,\n", " ).to(device)\n", "\n", " mpnn = ContextResMPNN(\n", " num_layers=NUM_LAYERS, embed=EMBED_DIM, hidden=HIDDEN_DIM,\n", " drop_rate=0.0, num_context_pairs=NUM_CONTEXT_PAIRS, dtype=torch.float32,\n", " ).to(device)\n", "\n", " gcn_train = train_model(\"GCN\", gcn, device, seed)\n", " mpnn_train = train_model(\"MPNN\", mpnn, device, seed)\n", "\n", " test_seed = seed + 99999\n", " set_all_seeds(test_seed)\n", "\n", " models = [\n", " {\"name\": \"No Preconditioner\", \"param_count\": 0, \"train_time\": 0.0,\n", " \"train_history\": [], \"best_loss\": None, \"best_epoch\": None,\n", " \"factory\": lambda A: None},\n", " {\"name\": \"Jacobi\", \"param_count\": 0, \"train_time\": 0.0,\n", " \"train_history\": [], \"best_loss\": None, \"best_epoch\": None,\n", " \"factory\": lambda A: Jacobi(A)},\n", " {**gcn_train, \"factory\": make_neural_factory(gcn, device)},\n", " {**mpnn_train, \"factory\": make_neural_factory(mpnn, device)},\n", " ]\n", "\n", " for domain in EVAL_DOMAINS:\n", " for gs in EVAL_GRID_SIZES:\n", " key = f\"{domain}_{gs}\"\n", " ood = gs not in TRAINING_GRID_SIZES\n", " ood_domain = domain == \"graph_laplacian\"\n", " tags = []\n", " if ood:\n", " tags.append(\"OOD-size\")\n", " if ood_domain:\n", " tags.append(\"OOD-domain\")\n", " tag_str = f\" ({', '.join(tags)})\" if tags else \"\"\n", " print(f\"\\n Eval: {domain} {gs}x{gs}{tag_str}\")\n", "\n", " gen = make_test_generator(domain, gs, device)\n", " test_matrices = [generate_test_matrix(gen, NUM_CONTEXT_PAIRS) for _ in range(NUM_TEST_MATRICES)]\n", "\n", " for m in models:\n", " eval_data = evaluate_preconditioner(m[\"factory\"], test_matrices)\n", " conv = sum(eval_data[\"converged\"])\n", " avg_iter = sum(eval_data[\"iterations\"]) / NUM_TEST_MATRICES\n", " print(f\" {m['name']:<20s} conv={conv}/{NUM_TEST_MATRICES} avg_iter={avg_iter:.1f}\")\n", "\n", " if \"eval\" not in m:\n", " m[\"eval\"] = {}\n", " m[\"eval\"][key] = eval_data\n", "\n", " seed_result = {\"seed\": seed, \"models\": []}\n", " for m in models:\n", " model_data = {\n", " \"name\": m[\"name\"],\n", " \"param_count\": m[\"param_count\"],\n", " \"train_time\": m[\"train_time\"],\n", " \"train_history\": m[\"train_history\"],\n", " \"best_loss\": m.get(\"best_loss\"),\n", " \"best_epoch\": m.get(\"best_epoch\"),\n", " \"eval\": m.get(\"eval\", {}),\n", " }\n", " seed_result[\"models\"].append(model_data)\n", "\n", " del gcn, mpnn\n", " if torch.cuda.is_available():\n", " torch.cuda.empty_cache()\n", "\n", " return seed_result" ], "execution_count": 6, "outputs": [] }, { "cell_type": "markdown", "metadata": { "id": "NFrRBXRL149N" }, "source": [ "## Run All Seeds\n", "\n", "Completed seeds are skipped automatically. Results are saved to disk after each seed finishes." ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "gvgRzx1K149N", "outputId": "0eb50eb0-3713-4a73-bd43-3297c8fc278f" }, "source": [ "data = load_existing_results()\n", "done = completed_seeds(data)\n", "\n", "for seed in SEEDS:\n", " if seed in done:\n", " print(f\"\\nSeed {seed} already completed, skipping.\")\n", " continue\n", "\n", " seed_result = run_seed(seed, device)\n", " data[\"seeds\"].append(seed_result)\n", " save_results(data)\n", " print(f\"\\n Seed {seed} saved to {RESULTS_FILE}\")\n", " done.add(seed)\n", "\n", "print(f\"\\nAll seeds complete. Results at {RESULTS_FILE}\")" ], "execution_count": 7, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "======================================================================\n", "SEED 42\n", "======================================================================\n", "\n", " Training GCN (11,074 params) seed=42\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\rTraining: 0%| | 0/1000 [00:005s} \", end=\"\")\n", "for name in model_names:\n", " print(f\"{'':>2s}{name:>18s}\", end=\"\")\n", "print()\n", "print(\"-\" * 100)\n", "\n", "for domain in EVAL_DOMAINS:\n", " for gs in EVAL_GRID_SIZES:\n", " key = f\"{domain}_{gs}\"\n", " ood = gs not in TRAINING_GRID_SIZES\n", " tag = \" *\" if ood else \"\"\n", "\n", " print(f\"{domain:<20s} {gs:>3d}x{gs}{tag:>2s} \", end=\"\")\n", "\n", " for model_name in model_names:\n", " all_conv = []\n", " all_iter = []\n", " for seed_data in data[\"seeds\"]:\n", " for m in seed_data[\"models\"]:\n", " if m[\"name\"] == model_name and key in m.get(\"eval\", {}):\n", " ev = m[\"eval\"][key]\n", " all_conv.extend(ev[\"converged\"])\n", " all_iter.extend(ev[\"iterations\"])\n", "\n", " if all_conv:\n", " conv_rate = sum(all_conv) / len(all_conv) * 100\n", " avg_iter = sum(all_iter) / len(all_iter)\n", " print(f\" {conv_rate:5.1f}% {avg_iter:5.1f}it\", end=\"\")\n", " else:\n", " print(f\" {'N/A':>13s}\", end=\"\")\n", "\n", " print()\n", "\n", "print(f\"\\n--- Training (avg over {len(data['seeds'])} seeds) ---\")\n", "for model_name in [\"GCN\", \"MPNN\"]:\n", " losses = []\n", " times = []\n", " for seed_data in data[\"seeds\"]:\n", " for m in seed_data[\"models\"]:\n", " if m[\"name\"] == model_name and m.get(\"best_loss\") is not None:\n", " losses.append(m[\"best_loss\"])\n", " times.append(m[\"train_time\"])\n", " if losses:\n", " print(f\" {model_name}: best_loss={np.mean(losses):.4e} +/- {np.std(losses):.4e}, \"\n", " f\"time={np.mean(times):.1f}s\")\n", "\n", "print(f\"\\n* = OOD grid size (not in training set)\")\n", "print(f\"graph_laplacian = OOD domain (not in training set)\")" ], "execution_count": 8, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "==========================================================================================\n", "FINAL SUMMARY\n", "==========================================================================================\n", "\n", "Domain Grid No Preconditioner Jacobi GCN MPNN\n", "----------------------------------------------------------------------------------------------------\n", "diffusion 16x16 100.0% 85.2it 100.0% 61.8it 100.0% 22.4it 100.0% 24.1it\n", "diffusion 24x24 100.0% 140.1it 100.0% 101.3it 100.0% 33.2it 100.0% 35.3it\n", "diffusion 32x32 100.0% 210.8it 100.0% 136.7it 100.0% 46.7it 100.0% 44.0it\n", "diffusion 48x48 * 2.0% 299.8it 98.0% 231.4it 100.0% 73.2it 100.0% 59.2it\n", "advection 16x16 100.0% 85.1it 100.0% 61.7it 100.0% 22.5it 100.0% 24.9it\n", "advection 24x24 100.0% 139.7it 100.0% 102.0it 100.0% 33.2it 100.0% 34.9it\n", "advection 32x32 100.0% 212.7it 100.0% 137.5it 100.0% 46.2it 100.0% 44.2it\n", "advection 48x48 * 5.0% 298.3it 97.0% 230.6it 100.0% 73.2it 100.0% 60.1it\n", "graph_laplacian 16x16 100.0% 14.5it 100.0% 8.8it 100.0% 6.0it 0.0% 300.0it\n", "graph_laplacian 24x24 100.0% 14.7it 100.0% 8.1it 100.0% 6.4it 0.0% 300.0it\n", "graph_laplacian 32x32 100.0% 14.4it 100.0% 7.4it 100.0% 6.5it 0.0% 300.0it\n", "graph_laplacian 48x48 * 100.0% 14.3it 100.0% 6.9it 100.0% 6.7it 0.0% 300.0it\n", "\n", "--- Training (avg over 5 seeds) ---\n", " GCN: best_loss=9.0607e-02 +/- 2.7669e-03, time=100.8s\n", " MPNN: best_loss=9.2091e-02 +/- 2.8662e-03, time=106.2s\n", "\n", "* = OOD grid size (not in training set)\n", "graph_laplacian = OOD domain (not in training set)\n" ] } ] }, { "cell_type": "markdown", "metadata": { "id": "lQ7I7SCp149N" }, "source": [ "## Download Results" ] }, { "cell_type": "code", "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 17 }, "id": "kENoYRAw149N", "outputId": "487860a3-1e67-4797-c369-8576c0330309" }, "source": [ "from google.colab import files\n", "files.download(str(RESULTS_FILE))" ], "execution_count": 9, "outputs": [ { "output_type": "display_data", "data": { "text/plain": [ "" ], "application/javascript": [ "\n", " async function download(id, filename, size) {\n", " if (!google.colab.kernel.accessAllowed) {\n", " return;\n", " }\n", " const div = document.createElement('div');\n", " const label = document.createElement('label');\n", " label.textContent = `Downloading \"${filename}\": `;\n", " div.appendChild(label);\n", " const progress = document.createElement('progress');\n", " progress.max = size;\n", " div.appendChild(progress);\n", " document.body.appendChild(div);\n", "\n", " const buffers = [];\n", " let downloaded = 0;\n", "\n", " const channel = await google.colab.kernel.comms.open(id);\n", " // Send a message to notify the kernel that we're ready.\n", " channel.send({})\n", "\n", " for await (const message of channel.messages) {\n", " // Send a message to notify the kernel that we're ready.\n", " channel.send({})\n", " if (message.buffers) {\n", " for (const buffer of message.buffers) {\n", " buffers.push(buffer);\n", " downloaded += buffer.byteLength;\n", " progress.value = downloaded;\n", " }\n", " }\n", " }\n", " const blob = new Blob(buffers, {type: 'application/binary'});\n", " const a = document.createElement('a');\n", " a.href = window.URL.createObjectURL(blob);\n", " a.download = filename;\n", " div.appendChild(a);\n", " a.click();\n", " div.remove();\n", " }\n", " " ] }, "metadata": {} }, { "output_type": "display_data", "data": { "text/plain": [ "" ], "application/javascript": [ "download(\"download_b376d41a-0d96-402a-a2c9-fa9b462b4e8e\", \"ablation_edge_features_v3.json\", 953198)" ] }, "metadata": {} } ] } ] }