Spaces:
Sleeping
Sleeping
Create stage3.py
Browse files
stage3.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# stage3.py
|
| 2 |
+
# Author: Liam Grinstead
|
| 3 |
+
# Purpose: Unified Telemetry and Energy Tracking Validation (Stage Three of Twelve)
|
| 4 |
+
|
| 5 |
+
import torch, time, json, random, math, argparse
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
# ---------------- Determinism ----------------
|
| 9 |
+
def set_seed(seed=1234):
|
| 10 |
+
random.seed(seed)
|
| 11 |
+
torch.manual_seed(seed)
|
| 12 |
+
torch.cuda.manual_seed_all(seed)
|
| 13 |
+
|
| 14 |
+
# ---------------- Telemetry ------------------
|
| 15 |
+
class Telemetry:
|
| 16 |
+
def __init__(self, log_path="stage3_telemetry.jsonl"):
|
| 17 |
+
self.t0 = time.time()
|
| 18 |
+
self.f = open(log_path, "w")
|
| 19 |
+
def emit(self, **k):
|
| 20 |
+
k["t"] = round(time.time() - self.t0, 3)
|
| 21 |
+
line = json.dumps(k, separators=(",", ":"))
|
| 22 |
+
print(line)
|
| 23 |
+
self.f.write(line + "\n"); self.f.flush()
|
| 24 |
+
def close(self):
|
| 25 |
+
self.f.close()
|
| 26 |
+
|
| 27 |
+
# ---------------- Orbital Coupler ------------
|
| 28 |
+
class Orbital:
|
| 29 |
+
def __init__(self, g=0.006, floor=0.2):
|
| 30 |
+
self.a = 0.0; self.b = math.pi/3; self.g = g; self.floor = floor
|
| 31 |
+
def step(self):
|
| 32 |
+
d = (self.b - self.a + math.pi) % (2*math.pi) - math.pi
|
| 33 |
+
if abs(d) < self.floor:
|
| 34 |
+
d = self.floor * (1 if d >= 0 else -1)
|
| 35 |
+
s = math.sin(d)
|
| 36 |
+
self.a = (self.a + self.g * s) % (2*math.pi)
|
| 37 |
+
self.b = (self.b - self.g * s) % (2*math.pi)
|
| 38 |
+
drift = abs((self.a - self.b + math.pi) % (2*math.pi) - math.pi)
|
| 39 |
+
return drift, abs(s)
|
| 40 |
+
|
| 41 |
+
# ---------------- DCLR Optimiser -------------
|
| 42 |
+
class DCLR(torch.optim.Optimizer):
|
| 43 |
+
def __init__(self, params, lr=5e-4, beta=0.9, gamma=0.999, eps=1e-8, cg=0.05):
|
| 44 |
+
super().__init__(params, dict(lr=lr, beta=beta, gamma=gamma, eps=eps, cg=cg))
|
| 45 |
+
@torch.no_grad()
|
| 46 |
+
def step(self, closure=None):
|
| 47 |
+
tot_J = 0.0
|
| 48 |
+
for g in self.param_groups:
|
| 49 |
+
lr, beta, gamma, eps, cg = g["lr"], g["beta"], g["gamma"], g["eps"], g["cg"]
|
| 50 |
+
for p in g["params"]:
|
| 51 |
+
if p.grad is None: continue
|
| 52 |
+
st = self.state[p]
|
| 53 |
+
if not st:
|
| 54 |
+
st["m"] = torch.zeros_like(p)
|
| 55 |
+
st["v"] = torch.zeros_like(p)
|
| 56 |
+
st["coh"] = torch.zeros_like(p)
|
| 57 |
+
m,v,h = st["m"],st["v"],st["coh"]; grad=p.grad
|
| 58 |
+
m.mul_(beta).add_(grad, alpha=1-beta)
|
| 59 |
+
v.mul_(gamma).addcmul_(grad, grad, value=1-gamma)
|
| 60 |
+
delta = grad - m
|
| 61 |
+
h.mul_(0.9).add_(delta.abs(), alpha=0.1)
|
| 62 |
+
lr_eff = lr / (1 + cg * h)
|
| 63 |
+
step = lr_eff * m / (v.sqrt() + eps)
|
| 64 |
+
p.add_(-step)
|
| 65 |
+
tot_J += (step * step).sum().item()
|
| 66 |
+
return None, tot_J
|
| 67 |
+
|
| 68 |
+
# ---------------- Tiny Network ---------------
|
| 69 |
+
class TinyNet(nn.Module):
|
| 70 |
+
def __init__(self, dim=128, classes=10):
|
| 71 |
+
super().__init__()
|
| 72 |
+
self.fc1 = nn.Linear(dim, dim)
|
| 73 |
+
self.fc2 = nn.Linear(dim, classes)
|
| 74 |
+
def forward(self, x):
|
| 75 |
+
x = torch.relu(self.fc1(x))
|
| 76 |
+
return self.fc2(x)
|
| 77 |
+
|
| 78 |
+
# ---------------- Runner ---------------------
|
| 79 |
+
def train(mode="RFT", steps=200, batch=256, log_path="stage3_telemetry.jsonl"):
|
| 80 |
+
set_seed(1234)
|
| 81 |
+
tm = Telemetry(log_path); orb = Orbital()
|
| 82 |
+
dev = "cuda" if torch.cuda.is_available() else "cpu"
|
| 83 |
+
net = TinyNet().to(dev)
|
| 84 |
+
opt = DCLR(net.parameters()) if mode == "RFT" else torch.optim.Adam(net.parameters(), lr=5e-4)
|
| 85 |
+
loss_fn = nn.CrossEntropyLoss()
|
| 86 |
+
for s in range(1, steps+1):
|
| 87 |
+
x = torch.randn(batch, 128, device=dev)
|
| 88 |
+
y = torch.randint(0, 10, (batch,), device=dev)
|
| 89 |
+
drift, flux = orb.step()
|
| 90 |
+
opt.zero_grad(set_to_none=True)
|
| 91 |
+
out = net(x); loss = loss_fn(out, y); loss.backward()
|
| 92 |
+
if isinstance(opt, DCLR): _, J = opt.step()
|
| 93 |
+
else: opt.step(); J = 0.0
|
| 94 |
+
acc = (out.argmax(1) == y).float().mean().item()
|
| 95 |
+
tm.emit(mode=mode, step=s, drift=round(drift,3), flux=round(flux,3),
|
| 96 |
+
E_ret=0.992, coh=0.999, loss=round(float(loss.item()),4),
|
| 97 |
+
acc=round(float(acc),3),
|
| 98 |
+
J_step=round(float(J*1e-6),6))
|
| 99 |
+
tm.close()
|
| 100 |
+
return f"Stage 3 complete. Telemetry saved to {log_path}"
|