OptiQ / src /ai /model.py
AhmedSamir1598's picture
first baseline for project OptiQ. Contains research resources, first baseline using GNNs + QC, and benchmarks against current industry standards, while addressing the challenges that prevents better practices to be used in industry.
55e3496
"""
Physics-Informed GNN — Predicts optimal power flow variables for a given topology.
Architecture (inspired by PINCO, PDF 1 Section 4.1):
- Input: graph with node features [Pd, Qd, Vm_init, is_slack, is_gen]
and edge features [R, X, in_service]
- Layers: 3 SAGEConv message-passing layers
- Output: per-bus voltage magnitude (Vm) + per-generator active/reactive power (Pg, Qg)
- Projection: clamp outputs to physical bounds
Training (inspired by DeepOPF-NGT, PDF 1 Section 4.3):
- Unsupervised: no ground-truth labels needed
- Loss = generation_cost + λ_p * |P_mismatch| + λ_q * |Q_mismatch| + λ_v * |V_violation|
"""
from __future__ import annotations
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv, global_mean_pool
from torch_geometric.data import Data
class OptiQGNN(nn.Module):
"""Graph Neural Network for Optimal Power Flow prediction.
Given a power grid graph (nodes=buses, edges=in-service lines),
predicts voltage magnitudes for each bus.
"""
def __init__(
self,
node_in_dim: int = 5,
edge_in_dim: int = 3,
hidden_dim: int = 64,
num_layers: int = 3,
dropout: float = 0.1,
vm_min: float = 0.90,
vm_max: float = 1.10,
):
super().__init__()
self.vm_min = vm_min
self.vm_max = vm_max
# Node feature encoder
self.node_encoder = nn.Sequential(
nn.Linear(node_in_dim, hidden_dim),
nn.ReLU(),
)
# Edge feature encoder
self.edge_encoder = nn.Sequential(
nn.Linear(edge_in_dim, hidden_dim),
nn.ReLU(),
)
# Message-passing layers
self.convs = nn.ModuleList()
self.norms = nn.ModuleList()
for _ in range(num_layers):
self.convs.append(SAGEConv(hidden_dim, hidden_dim))
self.norms.append(nn.LayerNorm(hidden_dim))
self.dropout = nn.Dropout(dropout)
# Output heads
self.vm_head = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim // 2),
nn.ReLU(),
nn.Linear(hidden_dim // 2, 1),
nn.Sigmoid(), # Output in [0, 1], scaled to [vm_min, vm_max]
)
def forward(self, data: Data) -> dict[str, torch.Tensor]:
"""Forward pass.
Parameters
----------
data : torch_geometric.data.Data
Must have: x (node features), edge_index, edge_attr
Returns
-------
dict with "vm" key: predicted voltage magnitudes (n_buses,)
"""
x = self.node_encoder(data.x)
for conv, norm in zip(self.convs, self.norms):
x_res = x
x = conv(x, data.edge_index)
x = norm(x)
x = F.relu(x)
x = self.dropout(x)
x = x + x_res # residual connection
# Voltage magnitude prediction
vm_raw = self.vm_head(x).squeeze(-1) # (n_buses,)
# Scale from [0, 1] to [vm_min, vm_max]
vm = self.vm_min + vm_raw * (self.vm_max - self.vm_min)
return {"vm": vm}
def build_model(config=None) -> OptiQGNN:
"""Build the GNN model with config parameters."""
if config is None:
from config import CFG
config = CFG.ai
return OptiQGNN(
hidden_dim=config.hidden_dim,
num_layers=config.num_layers,
dropout=config.dropout,
)