Spaces:
Sleeping
Sleeping
first baseline for project OptiQ. Contains research resources, first baseline using GNNs + QC, and benchmarks against current industry standards, while addressing the challenges that prevents better practices to be used in industry.
55e3496 | """ | |
| Inference — Use trained GNN to predict voltage profiles for a given topology. | |
| """ | |
| from __future__ import annotations | |
| import os | |
| import time | |
| import torch | |
| import pandapower as pp | |
| from config import CFG | |
| from src.ai.model import build_model | |
| from src.ai.dataset import net_to_pyg | |
| from src.grid.loader import clone_network | |
| from src.grid.power_flow import run_power_flow, extract_results | |
| _model_cache = {} | |
| def _load_model(checkpoint_path: str | None = None, device: str | None = None): | |
| """Load the trained GNN model (with caching).""" | |
| checkpoint_path = checkpoint_path or CFG.ai.checkpoint_path | |
| device = device or ("cuda" if torch.cuda.is_available() else "cpu") | |
| if checkpoint_path in _model_cache: | |
| return _model_cache[checkpoint_path], device | |
| model = build_model() | |
| if os.path.exists(checkpoint_path): | |
| model.load_state_dict(torch.load(checkpoint_path, map_location=device, weights_only=True)) | |
| else: | |
| raise FileNotFoundError(f"No model checkpoint at {checkpoint_path}") | |
| model = model.to(device) | |
| model.eval() | |
| _model_cache[checkpoint_path] = model | |
| return model, device | |
| def predict_voltage( | |
| net: pp.pandapowerNet, | |
| checkpoint_path: str | None = None, | |
| ) -> dict: | |
| """Predict voltage magnitudes for the given network using the GNN. | |
| Parameters | |
| ---------- | |
| net : pp.pandapowerNet | |
| Network with topology already set (lines in/out of service). | |
| Returns | |
| ------- | |
| dict with "vm_predicted" (list of floats) and "inference_time_ms". | |
| """ | |
| model, device = _load_model(checkpoint_path) | |
| # Convert to PyG (without solved results — we want to predict them) | |
| data = net_to_pyg(net, include_results=False) | |
| data = data.to(device) | |
| t0 = time.perf_counter() | |
| with torch.no_grad(): | |
| out = model(data) | |
| inference_ms = (time.perf_counter() - t0) * 1000 | |
| vm = out["vm"].cpu().numpy() | |
| return { | |
| "vm_predicted": [round(float(v), 4) for v in vm], | |
| "inference_time_ms": round(inference_ms, 2), | |
| } | |
| def ai_warm_start_power_flow( | |
| net: pp.pandapowerNet, | |
| open_lines: list[int], | |
| checkpoint_path: str | None = None, | |
| ) -> dict: | |
| """Use GNN to warm-start the Newton-Raphson power flow solver. | |
| 1. Apply topology (open specified lines) | |
| 2. Predict voltage magnitudes with GNN | |
| 3. Initialise pandapower with predicted voltages | |
| 4. Run power flow (should converge faster with good initial guess) | |
| Returns | |
| ------- | |
| dict with full power flow results + AI inference time. | |
| """ | |
| from src.grid.power_flow import apply_topology, extract_results | |
| # Apply topology | |
| net_new = apply_topology(net, open_lines) | |
| t_total_start = time.perf_counter() | |
| # Predict voltages | |
| ai_time_ms = 0.0 | |
| vm_pred = None | |
| try: | |
| prediction = predict_voltage(net_new, checkpoint_path) | |
| vm_pred = prediction["vm_predicted"] | |
| ai_time_ms = prediction["inference_time_ms"] | |
| except (FileNotFoundError, Exception): | |
| pass | |
| # Try power flow with AI warm start, fall back to flat start | |
| converged = False | |
| if vm_pred is not None: | |
| try: | |
| # First run with flat start to populate res tables | |
| pp.runpp(net_new, init="flat") | |
| # Now set predicted Vm and re-run with results init | |
| for i, vm in enumerate(vm_pred): | |
| if i < len(net_new.res_bus): | |
| net_new.res_bus.at[i, "vm_pu"] = vm | |
| pp.runpp(net_new, init="results") | |
| converged = True | |
| except pp.LoadflowNotConverged: | |
| # Fall back to flat start without warm start | |
| try: | |
| net_new2 = apply_topology(net, open_lines) | |
| pp.runpp(net_new2, init="flat") | |
| net_new = net_new2 | |
| converged = True | |
| except pp.LoadflowNotConverged: | |
| converged = False | |
| else: | |
| try: | |
| pp.runpp(net_new) | |
| converged = True | |
| except pp.LoadflowNotConverged: | |
| converged = False | |
| t_total_ms = (time.perf_counter() - t_total_start) * 1000 | |
| if converged: | |
| result = extract_results(net_new) | |
| result["open_lines"] = open_lines | |
| result["ai_inference_ms"] = ai_time_ms | |
| result["total_time_ms"] = round(t_total_ms, 2) | |
| return result | |
| else: | |
| return { | |
| "converged": False, | |
| "open_lines": open_lines, | |
| "ai_inference_ms": ai_time_ms, | |
| } | |