Spaces:
Sleeping
Sleeping
first baseline for project OptiQ. Contains research resources, first baseline using GNNs + QC, and benchmarks against current industry standards, while addressing the challenges that prevents better practices to be used in industry.
55e3496 | """ | |
| Physics-Informed Loss — Unsupervised training via power flow equations. | |
| Inspired by DeepOPF-NGT (PDF 1, Section 4.3): | |
| L = supervised_vm_loss + λ_v * voltage_violation_penalty | |
| For the MVP, we use a hybrid supervised + physics-penalty approach: | |
| - Supervised: MSE between predicted and solved Vm | |
| - Physics penalty: voltage bound violations (< 0.95 or > 1.05 p.u.) | |
| """ | |
| from __future__ import annotations | |
| import torch | |
| import torch.nn as nn | |
| class PhysicsInformedLoss(nn.Module): | |
| """Combined supervised + physics-penalty loss for GNN training. | |
| Components: | |
| 1. VM MSE: ||Vm_pred - Vm_true||^2 (supervised from pandapower solutions) | |
| 2. Voltage bounds: penalty for Vm outside [v_min, v_max] | |
| The voltage penalty uses a soft hinge: max(0, v_min - Vm)^2 + max(0, Vm - v_max)^2 | |
| """ | |
| def __init__( | |
| self, | |
| lambda_v: float = 10.0, | |
| v_min: float = 0.95, | |
| v_max: float = 1.05, | |
| ): | |
| super().__init__() | |
| self.lambda_v = lambda_v | |
| self.v_min = v_min | |
| self.v_max = v_max | |
| def forward( | |
| self, | |
| vm_pred: torch.Tensor, | |
| vm_true: torch.Tensor, | |
| ) -> dict[str, torch.Tensor]: | |
| """Compute the loss. | |
| Parameters | |
| ---------- | |
| vm_pred : (n_buses,) predicted voltage magnitudes | |
| vm_true : (n_buses,) ground-truth voltage magnitudes from pandapower | |
| Returns | |
| ------- | |
| dict with "total", "mse", "voltage_penalty" losses | |
| """ | |
| # 1. Supervised MSE | |
| mse = torch.mean((vm_pred - vm_true) ** 2) | |
| # 2. Voltage bound violations (soft hinge) | |
| low_violation = torch.clamp(self.v_min - vm_pred, min=0) ** 2 | |
| high_violation = torch.clamp(vm_pred - self.v_max, min=0) ** 2 | |
| voltage_penalty = torch.mean(low_violation + high_violation) | |
| total = mse + self.lambda_v * voltage_penalty | |
| return { | |
| "total": total, | |
| "mse": mse.detach(), | |
| "voltage_penalty": voltage_penalty.detach(), | |
| } | |
| class DynamicLagrangeLoss(nn.Module): | |
| """Physics loss with dynamic Lagrange multiplier adaptation. | |
| The multiplier λ_v is increased when violations are high and decreased | |
| when they are low (dual gradient ascent). This is the DeepOPF-NGT approach. | |
| """ | |
| def __init__( | |
| self, | |
| lambda_v_init: float = 10.0, | |
| dual_lr: float = 0.01, | |
| v_min: float = 0.95, | |
| v_max: float = 1.05, | |
| ): | |
| super().__init__() | |
| self.lambda_v = lambda_v_init | |
| self.dual_lr = dual_lr | |
| self.v_min = v_min | |
| self.v_max = v_max | |
| def forward( | |
| self, | |
| vm_pred: torch.Tensor, | |
| vm_true: torch.Tensor, | |
| ) -> dict[str, torch.Tensor]: | |
| mse = torch.mean((vm_pred - vm_true) ** 2) | |
| low_violation = torch.clamp(self.v_min - vm_pred, min=0) ** 2 | |
| high_violation = torch.clamp(vm_pred - self.v_max, min=0) ** 2 | |
| voltage_penalty = torch.mean(low_violation + high_violation) | |
| total = mse + self.lambda_v * voltage_penalty | |
| # Update multiplier (dual gradient ascent) | |
| with torch.no_grad(): | |
| self.lambda_v = max(0.0, self.lambda_v + self.dual_lr * voltage_penalty.item()) | |
| return { | |
| "total": total, | |
| "mse": mse.detach(), | |
| "voltage_penalty": voltage_penalty.detach(), | |
| "lambda_v": self.lambda_v, | |
| } | |