File size: 4,500 Bytes
55e3496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
"""
Inference — Use trained GNN to predict voltage profiles for a given topology.
"""
from __future__ import annotations

import os
import time

import torch
import pandapower as pp

from config import CFG
from src.ai.model import build_model
from src.ai.dataset import net_to_pyg
from src.grid.loader import clone_network
from src.grid.power_flow import run_power_flow, extract_results


_model_cache = {}


def _load_model(checkpoint_path: str | None = None, device: str | None = None):
    """Load the trained GNN model (with caching)."""
    checkpoint_path = checkpoint_path or CFG.ai.checkpoint_path
    device = device or ("cuda" if torch.cuda.is_available() else "cpu")

    if checkpoint_path in _model_cache:
        return _model_cache[checkpoint_path], device

    model = build_model()
    if os.path.exists(checkpoint_path):
        model.load_state_dict(torch.load(checkpoint_path, map_location=device, weights_only=True))
    else:
        raise FileNotFoundError(f"No model checkpoint at {checkpoint_path}")

    model = model.to(device)
    model.eval()
    _model_cache[checkpoint_path] = model
    return model, device


def predict_voltage(
    net: pp.pandapowerNet,
    checkpoint_path: str | None = None,
) -> dict:
    """Predict voltage magnitudes for the given network using the GNN.

    Parameters
    ----------
    net : pp.pandapowerNet
        Network with topology already set (lines in/out of service).

    Returns
    -------
    dict with "vm_predicted" (list of floats) and "inference_time_ms".
    """
    model, device = _load_model(checkpoint_path)

    # Convert to PyG (without solved results — we want to predict them)
    data = net_to_pyg(net, include_results=False)
    data = data.to(device)

    t0 = time.perf_counter()
    with torch.no_grad():
        out = model(data)
    inference_ms = (time.perf_counter() - t0) * 1000

    vm = out["vm"].cpu().numpy()

    return {
        "vm_predicted": [round(float(v), 4) for v in vm],
        "inference_time_ms": round(inference_ms, 2),
    }


def ai_warm_start_power_flow(
    net: pp.pandapowerNet,
    open_lines: list[int],
    checkpoint_path: str | None = None,
) -> dict:
    """Use GNN to warm-start the Newton-Raphson power flow solver.

    1. Apply topology (open specified lines)
    2. Predict voltage magnitudes with GNN
    3. Initialise pandapower with predicted voltages
    4. Run power flow (should converge faster with good initial guess)

    Returns
    -------
    dict with full power flow results + AI inference time.
    """
    from src.grid.power_flow import apply_topology, extract_results

    # Apply topology
    net_new = apply_topology(net, open_lines)

    t_total_start = time.perf_counter()

    # Predict voltages
    ai_time_ms = 0.0
    vm_pred = None
    try:
        prediction = predict_voltage(net_new, checkpoint_path)
        vm_pred = prediction["vm_predicted"]
        ai_time_ms = prediction["inference_time_ms"]
    except (FileNotFoundError, Exception):
        pass

    # Try power flow with AI warm start, fall back to flat start
    converged = False
    if vm_pred is not None:
        try:
            # First run with flat start to populate res tables
            pp.runpp(net_new, init="flat")
            # Now set predicted Vm and re-run with results init
            for i, vm in enumerate(vm_pred):
                if i < len(net_new.res_bus):
                    net_new.res_bus.at[i, "vm_pu"] = vm
            pp.runpp(net_new, init="results")
            converged = True
        except pp.LoadflowNotConverged:
            # Fall back to flat start without warm start
            try:
                net_new2 = apply_topology(net, open_lines)
                pp.runpp(net_new2, init="flat")
                net_new = net_new2
                converged = True
            except pp.LoadflowNotConverged:
                converged = False
    else:
        try:
            pp.runpp(net_new)
            converged = True
        except pp.LoadflowNotConverged:
            converged = False

    t_total_ms = (time.perf_counter() - t_total_start) * 1000

    if converged:
        result = extract_results(net_new)
        result["open_lines"] = open_lines
        result["ai_inference_ms"] = ai_time_ms
        result["total_time_ms"] = round(t_total_ms, 2)
        return result
    else:
        return {
            "converged": False,
            "open_lines": open_lines,
            "ai_inference_ms": ai_time_ms,
        }