""" agent-learn — FORGE Persistent Learning Layer Owns: Q-table (persistent), reward scoring pipeline, RLHF data store. Reads traces from agent-trace, writes rewards back, updates Q-values. Agents query here for best actions; NEXUS replaces its /tmp Q-table with this. """ import asyncio, hashlib, json, math, os, sqlite3, time, uuid from contextlib import asynccontextmanager from pathlib import Path import uvicorn from fastapi import FastAPI, HTTPException, Query, Request from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse # --------------------------------------------------------------------------- # Config # --------------------------------------------------------------------------- DB_PATH = Path(os.getenv("LEARN_DB", "/tmp/learn.db")) PORT = int(os.getenv("PORT", "7860")) LEARN_KEY = os.getenv("LEARN_KEY", "") TRACE_URL = os.getenv("TRACE_URL", "https://chris4k-agent-trace.hf.space") TRACE_KEY = os.getenv("TRACE_KEY", "") LEARN_RATE = float(os.getenv("LEARN_RATE", "0.1")) # α DISCOUNT = float(os.getenv("DISCOUNT", "0.9")) # γ EPSILON = float(os.getenv("EPSILON", "0.15")) # exploration rate SYNC_INTERVAL= int(os.getenv("SYNC_INTERVAL", "120")) # seconds between trace pulls # --------------------------------------------------------------------------- # Database # --------------------------------------------------------------------------- def get_db(): conn = sqlite3.connect(str(DB_PATH), check_same_thread=False) conn.row_factory = sqlite3.Row conn.execute("PRAGMA journal_mode=WAL") conn.execute("PRAGMA synchronous=NORMAL") return conn def init_db(): conn = get_db() conn.executescript(""" -- Q-table: one row per (agent, state_hash, action) CREATE TABLE IF NOT EXISTS qtable ( id TEXT PRIMARY KEY, agent TEXT NOT NULL, state_hash TEXT NOT NULL, state_json TEXT NOT NULL DEFAULT '{}', action TEXT NOT NULL, q_value REAL NOT NULL DEFAULT 0.0, visits INTEGER NOT NULL DEFAULT 0, last_reward REAL, updated_at REAL NOT NULL ); CREATE UNIQUE INDEX IF NOT EXISTS idx_qt_key ON qtable(agent, state_hash, action); CREATE INDEX IF NOT EXISTS idx_qt_agent ON qtable(agent); CREATE INDEX IF NOT EXISTS idx_qt_action ON qtable(action); -- Reward log: every scored trace event CREATE TABLE IF NOT EXISTS rewards ( id TEXT PRIMARY KEY, trace_id TEXT NOT NULL, agent TEXT NOT NULL, event_type TEXT NOT NULL, raw_score REAL NOT NULL, components TEXT NOT NULL DEFAULT '{}', ts REAL NOT NULL ); CREATE INDEX IF NOT EXISTS idx_rw_agent ON rewards(agent); CREATE INDEX IF NOT EXISTS idx_rw_ts ON rewards(ts DESC); -- RLHF store: labeled completions for future fine-tuning CREATE TABLE IF NOT EXISTS rlhf ( id TEXT PRIMARY KEY, agent TEXT NOT NULL DEFAULT 'unknown', prompt TEXT NOT NULL, completion TEXT NOT NULL, label TEXT NOT NULL DEFAULT 'unlabeled', -- approved|rejected|unlabeled reward REAL, source TEXT NOT NULL DEFAULT 'human', -- human|auto|model meta TEXT NOT NULL DEFAULT '{}', created_at REAL NOT NULL ); CREATE INDEX IF NOT EXISTS idx_rlhf_agent ON rlhf(agent); CREATE INDEX IF NOT EXISTS idx_rlhf_label ON rlhf(label); -- Cursor: last ts pulled from agent-trace per agent CREATE TABLE IF NOT EXISTS sync_cursor ( agent TEXT PRIMARY KEY, last_ts REAL NOT NULL DEFAULT 0.0 ); -- Skill candidates surfaced from traces CREATE TABLE IF NOT EXISTS skill_candidates ( id TEXT PRIMARY KEY, description TEXT NOT NULL, agent TEXT NOT NULL, frequency INTEGER NOT NULL DEFAULT 1, status TEXT NOT NULL DEFAULT 'pending', -- pending|promoted|rejected created_at REAL NOT NULL, updated_at REAL NOT NULL ); """) conn.commit(); conn.close() # --------------------------------------------------------------------------- # Q-table operations # --------------------------------------------------------------------------- def _state_hash(state: dict) -> str: canonical = json.dumps(state, sort_keys=True, separators=(',',':')) return hashlib.sha256(canonical.encode()).hexdigest()[:16] def q_get(agent: str, state: dict) -> list: """Return all (action, q_value, visits) rows for this agent+state.""" sh = _state_hash(state) conn = get_db() rows = conn.execute( "SELECT action, q_value, visits, last_reward FROM qtable WHERE agent=? AND state_hash=? ORDER BY q_value DESC", (agent, sh)).fetchall() conn.close() return [dict(r) for r in rows] def q_best_action(agent: str, state: dict, actions: list) -> dict: """ Epsilon-greedy action selection. Returns {"action": str, "q_value": float, "strategy": "exploit"|"explore"|"init"} """ import random sh = _state_hash(state) conn = get_db() rows = conn.execute( "SELECT action, q_value, visits FROM qtable WHERE agent=? AND state_hash=? ORDER BY q_value DESC", (agent, sh)).fetchall() conn.close() known = {r["action"]: (r["q_value"], r["visits"]) for r in rows} # Filter to valid actions valid = [a for a in actions if a] if not valid: return {"action": None, "q_value": 0.0, "strategy": "no_actions"} # Explore: random action if random.random() < EPSILON: a = random.choice(valid) return {"action": a, "q_value": known.get(a, (0.0, 0))[0], "strategy": "explore"} # Exploit: best known, or init with 0 for unknowns best_a, best_q = None, float('-inf') for a in valid: q = known.get(a, (0.0, 0))[0] if q > best_q: best_q, best_a = q, a strategy = "exploit" if best_a in known else "init" return {"action": best_a or valid[0], "q_value": best_q if best_q > float('-inf') else 0.0, "strategy": strategy} def q_update(agent: str, state: dict, action: str, reward: float, next_state: dict = None) -> dict: """ Q-learning update: Q(s,a) ← Q(s,a) + α[r + γ·max_Q(s') - Q(s,a)] """ sh = _state_hash(state) now = time.time() conn = get_db() # Current Q(s,a) row = conn.execute( "SELECT q_value, visits FROM qtable WHERE agent=? AND state_hash=? AND action=?", (agent, sh, action)).fetchone() q_old = row["q_value"] if row else 0.0 visits = (row["visits"] if row else 0) + 1 # max Q(s') if next_state provided max_q_next = 0.0 if next_state: nsh = _state_hash(next_state) best_next = conn.execute( "SELECT MAX(q_value) FROM qtable WHERE agent=? AND state_hash=?", (agent, nsh)).fetchone()[0] max_q_next = best_next or 0.0 q_new = q_old + LEARN_RATE * (reward + DISCOUNT * max_q_next - q_old) row_id = str(uuid.uuid4()) conn.execute(""" INSERT INTO qtable (id,agent,state_hash,state_json,action,q_value,visits,last_reward,updated_at) VALUES (?,?,?,?,?,?,?,?,?) ON CONFLICT(agent,state_hash,action) DO UPDATE SET q_value=excluded.q_value, visits=excluded.visits, last_reward=excluded.last_reward, updated_at=excluded.updated_at """, (row_id, agent, sh, json.dumps(state), action, q_new, visits, reward, now)) conn.commit(); conn.close() return {"agent": agent, "action": action, "q_old": round(q_old, 5), "q_new": round(q_new, 5), "reward": reward, "visits": visits} def q_hint(agent: str, state: dict, action: str, nudge: float) -> dict: """Manual Q-value nudge (bias from operator). Additive.""" sh = _state_hash(state) now = time.time() conn = get_db() row = conn.execute( "SELECT q_value, visits FROM qtable WHERE agent=? AND state_hash=? AND action=?", (agent, sh, action)).fetchone() q_old = row["q_value"] if row else 0.0 visits = (row["visits"] if row else 0) q_new = q_old + nudge conn.execute(""" INSERT INTO qtable (id,agent,state_hash,state_json,action,q_value,visits,last_reward,updated_at) VALUES (?,?,?,?,?,?,?,?,?) ON CONFLICT(agent,state_hash,action) DO UPDATE SET q_value=excluded.q_value, updated_at=excluded.updated_at """, (str(uuid.uuid4()), agent, sh, json.dumps(state), action, q_new, visits, None, now)) conn.commit(); conn.close() return {"agent": agent, "action": action, "q_old": round(q_old,5), "q_new": round(q_new,5), "nudge": nudge} def q_stats() -> dict: conn = get_db() total = conn.execute("SELECT COUNT(*) FROM qtable").fetchone()[0] agents = conn.execute("SELECT agent, COUNT(*) as n, AVG(q_value) as avg_q, MAX(q_value) as max_q " "FROM qtable GROUP BY agent ORDER BY n DESC").fetchall() top = conn.execute("SELECT agent, action, q_value, visits FROM qtable " "ORDER BY q_value DESC LIMIT 10").fetchall() worst = conn.execute("SELECT agent, action, q_value, visits FROM qtable " "ORDER BY q_value ASC LIMIT 10").fetchall() conn.close() return { "total_entries": total, "by_agent": [dict(r) for r in agents], "top_actions": [dict(r) for r in top], "worst_actions": [dict(r) for r in worst], } # --------------------------------------------------------------------------- # Reward scoring — 0–10 float scale # --------------------------------------------------------------------------- # Scale semantics: # 0–1 catastrophic (PII leak, injection, critical safety failure) # 2–3 failure (error, hallucinated tool, unrecoverable) # 4–5 partial (slow, compensated saga, incomplete) # 6 acceptable (baseline — completed without issues) # 7 good (fast, used skill, memory stored) # 8 excellent (all bonuses, fast, clean) # 9 exceptional (auto ceiling — reserved for near-perfect) # 10 human-only (PATCH /api/traces/{id}/rate override only) # # Auto-score is capped at 9.0. # Human rating via PATCH /api/rlhf/{id} can set 10. # RLHF auto-collection: score>=8 → preferred, score<=3 → rejected SCORE_BASELINE = 6.0 SCORE_AUTO_CEILING = 9.0 SCORE_HUMAN_MAX = 10.0 def score_trace_event(ev: dict) -> tuple[float, dict]: """ Score a trace event on a 0–10 float scale. Returns (score, components). """ components: dict = {} score = SCORE_BASELINE # ── Deductions ──────────────────────────────────────────────── if ev.get("status") == "error": components["error"] = -3.0 score -= 3.0 if ev.get("injection_detected"): components["injection_detected"] = -4.0 score -= 4.0 if ev.get("pii_leaked"): components["pii_leaked"] = -4.0 score -= 4.0 if ev.get("hallucinated_tool"): components["hallucinated_tool"] = -3.0 score -= 3.0 if ev.get("saga_compensated"): components["saga_compensated"] = -1.0 score -= 1.0 lat = ev.get("latency_ms") if lat is not None and lat > 8000: components["latency_over_8s"] = -1.5 score -= 1.5 # ── Bonuses ─────────────────────────────────────────────────── if ev.get("event_type") == "skill_load": components["skill_load"] = +0.5 score += 0.5 if ev.get("skill_candidate"): components["skill_candidate"] = +1.0 score += 1.0 if ev.get("memory_stored"): components["memory_stored"] = +0.3 score += 0.3 if lat is not None and lat < 1000 and ev.get("event_type") == "llm_call": components["latency_under_1s"] = +0.5 score += 0.5 if ev.get("saga_clean"): components["saga_clean"] = +0.5 score += 0.5 # Clamp 0–AUTO_CEILING (10 is human-only) score = max(0.0, min(SCORE_AUTO_CEILING, score)) return round(score, 2), components # --------------------------------------------------------------------------- # Trace sync pipeline # --------------------------------------------------------------------------- _http_client = None def _get_http(): global _http_client if _http_client is None: try: import httpx _http_client = httpx.Client(timeout=10.0) except ImportError: import urllib.request as _ur _http_client = "urllib" return _http_client def _http_get(url, params=None) -> dict: client = _get_http() if hasattr(client, "get"): r = client.get(url, params=params) return r.json() else: import urllib.request, urllib.parse if params: url = url + "?" + urllib.parse.urlencode(params) with urllib.request.urlopen(url, timeout=10) as resp: return json.loads(resp.read()) def _http_patch(url, data: dict) -> bool: client = _get_http() if hasattr(client, "patch"): r = client.patch(url, json=data) return r.status_code < 300 else: import urllib.request req = urllib.request.Request(url, data=json.dumps(data).encode(), headers={"Content-Type":"application/json"}, method="PATCH") try: urllib.request.urlopen(req, timeout=5) return True except Exception: return False def pull_and_score_traces() -> dict: """ Pull unscored traces from agent-trace, score them, write rewards back. Returns summary stats. """ conn = get_db() cursor_rows = {r["agent"]: r["last_ts"] for r in conn.execute("SELECT agent, last_ts FROM sync_cursor").fetchall()} conn.close() try: data = _http_get(f"{TRACE_URL}/api/traces", {"has_reward": "false", "since_hours": 48, "limit": 200}) events = data.get("events", []) except Exception as e: return {"ok": False, "error": str(e)} scored = 0 skipped = 0 reward_sum = 0.0 new_cursors = {} for ev in events: agent = ev.get("agent", "unknown") ts = ev.get("ts", 0) # Skip already-rewarded if ev.get("reward") is not None: skipped += 1 continue reward, components = score_trace_event(ev) # Write reward back to agent-trace try: _http_patch(f"{TRACE_URL}/api/trace/{ev['id']}/reward", {"reward": reward, "source": "learn"}) except Exception: pass # best-effort # Log reward locally conn = get_db() conn.execute(""" INSERT OR IGNORE INTO rewards (id,trace_id,agent,event_type,raw_score,components,ts) VALUES (?,?,?,?,?,?,?) """, (str(uuid.uuid4()), ev["id"], agent, ev.get("event_type","custom"), reward, json.dumps(components), time.time())) conn.commit(); conn.close() # Q-table update: map event → (state, action) _update_qtable_from_trace(ev, reward) # RLHF auto-collection: preferred (>=8) and rejected (<=3) if reward >= 8.0 or reward <= 3.0: label = "approved" if reward >= 8.0 else "rejected" prompt = (f"[{ev.get('agent','?')}] {ev.get('event_type','?')}: " f"{ev.get('tool_name') or ev.get('model') or ev.get('task','')}") completion = json.dumps({k: ev.get(k) for k in ("status","latency_ms","tokens_out","saga_clean","skill_candidate","memory_stored") if ev.get(k) is not None}) try: rlhf_add(ev.get("agent","unknown"), prompt, completion, label=label, reward=reward, source="auto", meta={"trace_id": ev["id"], "components": components}) except Exception: pass scored += 1 reward_sum += reward new_cursors[agent] = max(new_cursors.get(agent, 0), ts) # Update cursors if new_cursors: conn = get_db() for agent, ts in new_cursors.items(): conn.execute("INSERT INTO sync_cursor (agent,last_ts) VALUES (?,?) " "ON CONFLICT(agent) DO UPDATE SET last_ts=MAX(last_ts,excluded.last_ts)", (agent, ts)) conn.commit(); conn.close() return { "ok": True, "scored": scored, "skipped": skipped, "avg_reward": round(reward_sum / max(scored, 1), 4), } def _update_qtable_from_trace(ev: dict, reward: float): """Map a trace event to a Q-table update.""" agent = ev.get("agent", "unknown") event_type = ev.get("event_type", "custom") model = ev.get("model", "") tool = ev.get("tool_name", "") lat = ev.get("latency_ms") # State: context that was available when the decision was made # Action: the choice that was made if event_type == "llm_call" and model: # State: which agent, what kind of task state = {"agent": agent, "event": "model_selection"} action = model q_update(agent, state, action, reward) elif event_type == "tool_use" and tool: state = {"agent": agent, "event": "tool_selection"} action = tool q_update(agent, state, action, reward) elif event_type == "skill_load" and ev.get("skill_id"): state = {"agent": agent, "event": "skill_selection"} action = ev["skill_id"] q_update(agent, state, action, reward) # --------------------------------------------------------------------------- # RLHF store # --------------------------------------------------------------------------- def rlhf_add(agent: str, prompt: str, completion: str, label: str = "unlabeled", reward: float = None, source: str = "human", meta: dict = None) -> str: now = time.time() rid = str(uuid.uuid4()) label = label if label in ("approved","rejected","unlabeled") else "unlabeled" conn = get_db() conn.execute(""" INSERT INTO rlhf (id,agent,prompt,completion,label,reward,source,meta,created_at) VALUES (?,?,?,?,?,?,?,?,?) """, (rid, agent, prompt, completion, label, reward, source, json.dumps(meta or {}), now)) conn.commit(); conn.close() return rid def rlhf_label(entry_id: str, label: str, reward: float = None) -> bool: label = label if label in ("approved","rejected","unlabeled") else "unlabeled" conn = get_db() n = conn.execute( "UPDATE rlhf SET label=?, reward=? WHERE id=?", (label, reward, entry_id) ).rowcount conn.commit(); conn.close() return n > 0 def rlhf_list(agent: str = "", label: str = "", limit: int = 50) -> list: conn = get_db() where, params = [], [] if agent: where.append("agent=?"); params.append(agent) if label: where.append("label=?"); params.append(label) sql = ("SELECT * FROM rlhf" + (f" WHERE {' AND '.join(where)}" if where else "") + " ORDER BY created_at DESC LIMIT ?") rows = conn.execute(sql, params+[limit]).fetchall() conn.close() result = [] for r in rows: d = dict(r) try: d["meta"] = json.loads(d["meta"]) except Exception: pass result.append(d) return result def rlhf_stats() -> dict: conn = get_db() rows = conn.execute("SELECT label, COUNT(*) as n FROM rlhf GROUP BY label").fetchall() conn.close() total = sum(r["n"] for r in rows) return {"total": total, "by_label": {r["label"]: r["n"] for r in rows}} # --------------------------------------------------------------------------- # Skill candidates # --------------------------------------------------------------------------- def candidate_add(description: str, agent: str) -> str: conn = get_db() # Dedup: if description matches existing pending candidate, increment frequency existing = conn.execute( "SELECT id, frequency FROM skill_candidates WHERE description=? AND status='pending'", (description,)).fetchone() if existing: conn.execute("UPDATE skill_candidates SET frequency=frequency+1, updated_at=? WHERE id=?", (time.time(), existing["id"])) conn.commit(); conn.close() return existing["id"] cid = str(uuid.uuid4()) now = time.time() conn.execute(""" INSERT INTO skill_candidates (id,description,agent,frequency,status,created_at,updated_at) VALUES (?,?,?,1,'pending',?,?) """, (cid, description, agent, now, now)) conn.commit(); conn.close() return cid def candidate_update(cid: str, status: str) -> bool: conn = get_db() n = conn.execute("UPDATE skill_candidates SET status=?, updated_at=? WHERE id=?", (status, time.time(), cid)).rowcount conn.commit(); conn.close() return n > 0 def candidates_list(status: str = "pending") -> list: conn = get_db() rows = conn.execute( "SELECT * FROM skill_candidates WHERE status=? ORDER BY frequency DESC, created_at DESC", (status,)).fetchall() conn.close() return [dict(r) for r in rows] # --------------------------------------------------------------------------- # Learn stats # --------------------------------------------------------------------------- def learn_stats() -> dict: conn = get_db() rw_count = conn.execute("SELECT COUNT(*) FROM rewards").fetchone()[0] rw_avg = conn.execute("SELECT AVG(raw_score) FROM rewards").fetchone()[0] rw_24h = conn.execute("SELECT COUNT(*), AVG(raw_score) FROM rewards WHERE ts>=?", (time.time()-86400,)).fetchone() rlhf_s = rlhf_stats() cands = conn.execute("SELECT COUNT(*) FROM skill_candidates WHERE status='pending'").fetchone()[0] conn.close() qs = q_stats() return { "qtable": qs, "rewards": { "total": rw_count, "avg_all_time": round(rw_avg or 0, 4), "last_24h": {"count": rw_24h[0], "avg": round(rw_24h[1] or 0, 4)}, }, "rlhf": rlhf_s, "skill_candidates_pending": cands, } def reward_trend(hours: int = 24, bucket_minutes: int = 60) -> list: conn = get_db() since = time.time() - hours * 3600 rows = conn.execute( "SELECT ts, raw_score, agent, event_type FROM rewards WHERE ts>=? ORDER BY ts", (since,)).fetchall() conn.close() if not rows: return [] # Bucket by hour buckets = {} for r in rows: h = int(r["ts"] // 3600) * 3600 if h not in buckets: buckets[h] = {"ts": h, "count": 0, "total": 0.0} buckets[h]["count"] += 1 buckets[h]["total"] += r["raw_score"] return [{"ts": v["ts"], "count": v["count"], "avg_reward": round(v["total"]/v["count"],4)} for v in sorted(buckets.values(), key=lambda x: x["ts"])] # --------------------------------------------------------------------------- # Background sync loop # --------------------------------------------------------------------------- async def _sync_loop(): while True: await asyncio.sleep(SYNC_INTERVAL) try: pull_and_score_traces() except Exception: pass # --------------------------------------------------------------------------- # Seed # --------------------------------------------------------------------------- def seed_demo(): conn = get_db() n = conn.execute("SELECT COUNT(*) FROM qtable").fetchone()[0] conn.close() if n > 0: return # Seed NEXUS model selection Q-table from prior knowledge now = time.time() entries = [ # ki-fusion RTX5090 is best when available ("nexus", {"agent":"nexus","event":"model_selection"}, "qwen/qwen3.5-35b-a3b", 0.72), ("nexus", {"agent":"nexus","event":"model_selection"}, "claude-haiku-4-5", 0.55), ("nexus", {"agent":"nexus","event":"model_selection"}, "hf_api", 0.30), ("nexus", {"agent":"nexus","event":"model_selection"}, "local_cpu", 0.10), # Tool selection ("pulse", {"agent":"pulse","event":"tool_selection"}, "kanban_create", 0.65), ("pulse", {"agent":"pulse","event":"tool_selection"}, "slot_reserve", 0.60), ("pulse", {"agent":"pulse","event":"tool_selection"}, "trigger_agent", 0.50), # Skill reuse ("pulse", {"agent":"pulse","event":"skill_selection"}, "calculator", 0.40), ("pulse", {"agent":"pulse","event":"skill_selection"}, "forge_client", 0.55), ] for agent, state, action, q in entries: sh = _state_hash(state) conn = get_db() conn.execute(""" INSERT OR IGNORE INTO qtable (id,agent,state_hash,state_json,action,q_value,visits,last_reward,updated_at) VALUES (?,?,?,?,?,?,0,NULL,?) """, (str(uuid.uuid4()), agent, sh, json.dumps(state), action, q, now)) conn.commit(); conn.close() # Seed RLHF examples examples = [ ("nexus", "Route this query to the best available LLM.", "I will use ki-fusion RTX5090 (qwen3.5-35b) as it has the best quality/speed ratio.", "approved", 0.9), ("nexus", "Route this query to the best available LLM.", "I will use local_cpu for this complex multi-step reasoning task.", "rejected", -0.3), ("pulse", "Schedule this long-running background task.", "I will reserve an LLM slot before starting and release it on completion.", "approved", 0.8), ] for agent, prompt, completion, label, reward in examples: rlhf_add(agent, prompt, completion, label, reward, "seed") # Seed a skill candidate candidate_add("Pattern: agents repeatedly fetch the same URL multiple times per session → caching skill needed", "learn") # --------------------------------------------------------------------------- # MCP # --------------------------------------------------------------------------- MCP_TOOLS = [ {"name":"learn_q_get","description":"Get all Q-values for an agent+state.", "inputSchema":{"type":"object","required":["agent","state"], "properties":{"agent":{"type":"string"},"state":{"type":"object"}}}}, {"name":"learn_q_best","description":"Get best action (epsilon-greedy) for an agent+state.", "inputSchema":{"type":"object","required":["agent","state","actions"], "properties":{"agent":{"type":"string"},"state":{"type":"object"}, "actions":{"type":"array","items":{"type":"string"}}}}}, {"name":"learn_q_update","description":"Update Q-value after taking an action and observing reward.", "inputSchema":{"type":"object","required":["agent","state","action","reward"], "properties":{"agent":{"type":"string"},"state":{"type":"object"}, "action":{"type":"string"},"reward":{"type":"number"}, "next_state":{"type":"object"}}}}, {"name":"learn_q_hint","description":"Manually nudge a Q-value (operator override).", "inputSchema":{"type":"object","required":["agent","state","action","nudge"], "properties":{"agent":{"type":"string"},"state":{"type":"object"}, "action":{"type":"string"},"nudge":{"type":"number"}}}}, {"name":"learn_stats","description":"Get learning system statistics.", "inputSchema":{"type":"object","properties":{}}}, {"name":"learn_rlhf_add","description":"Add a labeled completion to the RLHF store.", "inputSchema":{"type":"object","required":["agent","prompt","completion"], "properties":{"agent":{"type":"string"},"prompt":{"type":"string"}, "completion":{"type":"string"},"label":{"type":"string"}, "reward":{"type":"number"},"source":{"type":"string"}}}}, {"name":"learn_score_trace","description":"Score a single trace event and return reward.", "inputSchema":{"type":"object","required":["event"], "properties":{"event":{"type":"object","description":"Trace event dict"}}}}, {"name":"learn_candidate_add","description":"Add a skill candidate for review.", "inputSchema":{"type":"object","required":["description","agent"], "properties":{"description":{"type":"string"},"agent":{"type":"string"}}}}, {"name":"learn_sync","description":"Trigger immediate trace pull and reward scoring.", "inputSchema":{"type":"object","properties":{}}}, {"name":"learn_rate_trace","description":"Human rating override for a trace (0–10 float). Score 10 is human-only ceiling. Scores >=8 auto-labeled preferred, <=3 auto-labeled rejected in RLHF store.", "inputSchema":{"type":"object","required":["trace_id","rating"], "properties":{"trace_id":{"type":"string"},"rating":{"type":"number","minimum":0,"maximum":10}, "agent":{"type":"string"},"comment":{"type":"string"}}}}, ] def handle_mcp(method, params, req_id): def ok(r): return {"jsonrpc":"2.0","id":req_id,"result":r} def txt(d): return ok({"content":[{"type":"text","text":json.dumps(d)}]}) if method=="initialize": return ok({"protocolVersion":"2024-11-05", "serverInfo":{"name":"agent-learn","version":"1.0.0"}, "capabilities":{"tools":{}}}) if method=="tools/list": return ok({"tools":MCP_TOOLS}) if method=="tools/call": n, a = params.get("name",""), params.get("arguments",{}) if n=="learn_q_get": return txt({"entries":q_get(a["agent"],a["state"])}) if n=="learn_q_best": return txt(q_best_action(a["agent"],a["state"],a.get("actions",[]))) if n=="learn_q_update": return txt(q_update(a["agent"],a["state"],a["action"],float(a["reward"]),a.get("next_state"))) if n=="learn_q_hint": return txt(q_hint(a["agent"],a["state"],a["action"],float(a["nudge"]))) if n=="learn_stats": return txt(learn_stats()) if n=="learn_rlhf_add": rid = rlhf_add(a["agent"],a["prompt"],a["completion"], a.get("label","unlabeled"),a.get("reward"),a.get("source","mcp")) return txt({"ok":True,"id":rid}) if n=="learn_score_trace": score, comp = score_trace_event(a.get("event",{})) return txt({"reward":score,"components":comp}) if n=="learn_candidate_add": cid = candidate_add(a["description"],a["agent"]) return txt({"ok":True,"id":cid}) if n=="learn_sync": return txt(pull_and_score_traces()) if n=="learn_rate_trace": rating = float(a["rating"]) if not (0.0 <= rating <= SCORE_HUMAN_MAX): return txt({"ok":False,"error":f"rating must be 0–{SCORE_HUMAN_MAX}"}) agent = str(a.get("agent","unknown")) comment = str(a.get("comment","")) try: _http_patch(f"{TRACE_URL}/api/trace/{a['trace_id']}/reward", {"reward":rating,"source":"human","comment":comment}) except Exception: pass label = "approved" if rating>=8.0 else ("rejected" if rating<=3.0 else "unlabeled") conn = get_db() conn.execute("INSERT OR IGNORE INTO rewards (id,trace_id,agent,event_type,raw_score,components,ts) VALUES (?,?,?,?,?,?,?)", (str(uuid.uuid4()),a["trace_id"],agent,"human_rating",rating, json.dumps({"human_override":True,"comment":comment}),time.time())) conn.commit(); conn.close() rid = rlhf_add(agent,f"[human-rated] {a['trace_id']}",comment or "human override", label=label,reward=rating,source="human",meta={"trace_id":a["trace_id"]}) return txt({"ok":True,"trace_id":a["trace_id"],"rating":rating,"label":label,"rlhf_id":rid}) return {"jsonrpc":"2.0","id":req_id,"error":{"code":-32601,"message":f"Unknown tool: {n}"}} if method in ("notifications/initialized","notifications/cancelled"): return None return {"jsonrpc":"2.0","id":req_id,"error":{"code":-32601,"message":f"Method not found: {method}"}} # --------------------------------------------------------------------------- # FastAPI app # --------------------------------------------------------------------------- @asynccontextmanager async def lifespan(app): init_db(); seed_demo() asyncio.create_task(_sync_loop()) yield app = FastAPI(title="agent-learn", version="1.0.0", lifespan=lifespan) def _auth(r): return not LEARN_KEY or r.headers.get("x-learn-key","") == LEARN_KEY # --- Q-table REST --- @app.get("/api/q") async def api_q_get(agent:str=Query(...), state:str=Query("{}") ): try: s = json.loads(state) except Exception: raise HTTPException(400,"state must be JSON") return JSONResponse({"entries": q_get(agent, s)}) @app.post("/api/q/best") async def api_q_best(request:Request): b = await request.json() return JSONResponse(q_best_action(b["agent"], b.get("state",{}), b.get("actions",[]))) @app.post("/api/q/update") async def api_q_update(request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() return JSONResponse(q_update(b["agent"],b.get("state",{}),b["action"],float(b["reward"]),b.get("next_state"))) @app.post("/api/q/hint") async def api_q_hint(request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() return JSONResponse(q_hint(b["agent"],b.get("state",{}),b["action"],float(b["nudge"]))) @app.get("/api/q/stats") async def api_q_stats(): return JSONResponse(q_stats()) # --- Scoring --- @app.post("/api/score") async def api_score(request:Request): b = await request.json() score, comp = score_trace_event(b) return JSONResponse({"reward": score, "components": comp}) @app.post("/api/sync") async def api_sync(request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") result = pull_and_score_traces() return JSONResponse(result) # --- RLHF --- @app.get("/api/rlhf") async def api_rlhf_list(agent:str=Query(""), label:str=Query(""), limit:int=Query(50)): return JSONResponse({"entries": rlhf_list(agent,label,limit)}) @app.post("/api/rlhf", status_code=201) async def api_rlhf_add(request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() rid = rlhf_add(b.get("agent","unknown"),b["prompt"],b["completion"], b.get("label","unlabeled"),b.get("reward"),b.get("source","api"),b.get("meta")) return JSONResponse({"ok":True,"id":rid}) @app.patch("/api/rlhf/{entry_id}") async def api_rlhf_label(entry_id:str, request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() ok = rlhf_label(entry_id, b.get("label","unlabeled"), b.get("reward")) return JSONResponse({"ok":ok}) @app.patch("/api/traces/{trace_id}/rate") async def api_trace_rate(trace_id:str, request:Request): """Human rating override — allows score of 10 (human-only ceiling). Writes back to agent-trace and updates Q-table.""" if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() rating = float(b.get("rating", b.get("reward", 0.0))) if not (0.0 <= rating <= SCORE_HUMAN_MAX): raise HTTPException(400, f"rating must be 0–{SCORE_HUMAN_MAX}") agent = str(b.get("agent","unknown")) comment = str(b.get("comment","")) # Write reward back to agent-trace (best-effort) try: _http_patch(f"{TRACE_URL}/api/trace/{trace_id}/reward", {"reward": rating, "source": "human", "comment": comment}) except Exception: pass # Log in rewards table conn = get_db() conn.execute(""" INSERT OR IGNORE INTO rewards (id,trace_id,agent,event_type,raw_score,components,ts) VALUES (?,?,?,?,?,?,?) """, (str(uuid.uuid4()), trace_id, agent, "human_rating", rating, json.dumps({"human_override": True, "comment": comment}), time.time())) conn.commit(); conn.close() # RLHF: store as approved/rejected based on rating label = "approved" if rating >= 8.0 else ("rejected" if rating <= 3.0 else "unlabeled") rlhf_add(agent, f"[human-rated trace] {trace_id}", comment or "human override", label=label, reward=rating, source="human", meta={"trace_id": trace_id, "comment": comment}) return JSONResponse({"ok": True, "trace_id": trace_id, "rating": rating, "label": label}) # --- Skill candidates --- @app.get("/api/candidates") async def api_candidates(status:str=Query("pending")): return JSONResponse({"candidates": candidates_list(status)}) @app.patch("/api/candidates/{cid}") async def api_candidate_update(cid:str, request:Request): if not _auth(request): raise HTTPException(403,"Invalid X-Learn-Key") b = await request.json() ok = candidate_update(cid, b.get("status","pending")) return JSONResponse({"ok":ok}) # --- Stats --- @app.get("/api/stats") async def api_stats(): return JSONResponse(learn_stats()) @app.get("/api/reward-trend") async def api_trend(hours:int=Query(24)): return JSONResponse({"trend":reward_trend(hours)}) @app.get("/api/health") async def api_health(): conn=get_db(); n=conn.execute("SELECT COUNT(*) FROM qtable").fetchone()[0]; conn.close() return JSONResponse({"ok":True,"qtable_entries":n,"version":"1.0.0"}) # --- MCP --- @app.get("/mcp/sse") async def mcp_sse(request:Request): async def gen(): yield f"data: {json.dumps({'jsonrpc':'2.0','method':'connected','params':{}})}\n\n" yield f"data: {json.dumps({'jsonrpc':'2.0','method':'notifications/tools','params':{'tools':MCP_TOOLS}})}\n\n" while True: if await request.is_disconnected(): break yield ": ping\n\n"; await asyncio.sleep(15) return StreamingResponse(gen(), media_type="text/event-stream", headers={"Cache-Control":"no-cache","Connection":"keep-alive","X-Accel-Buffering":"no"}) @app.post("/mcp") async def mcp_rpc(request:Request): try: body = await request.json() except Exception: return JSONResponse({"jsonrpc":"2.0","id":None,"error":{"code":-32700,"message":"Parse error"}}) if isinstance(body,list): return JSONResponse([r for r in [handle_mcp(x.get("method",""),x.get("params",{}),x.get("id")) for x in body] if r]) r = handle_mcp(body.get("method",""),body.get("params",{}),body.get("id")) return JSONResponse(r or {"jsonrpc":"2.0","id":body.get("id"),"result":{}}) # --------------------------------------------------------------------------- # SPA Dashboard # --------------------------------------------------------------------------- SPA = r""" 🧠 LEARN — FORGE Learning Layer
FORGE Learning Layer
Q-entries
Rewards
Avg reward
Candidates
⚙ Q-Table
🏆 Rewards
👥 RLHF
💡 Skill Candidates
⚙︎ Config
""" @app.get("/", response_class=HTMLResponse) async def root(): return HTMLResponse(content=SPA, media_type="text/html; charset=utf-8") if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=PORT, log_level="info")