GitHub Copilot commited on
Commit
2f7a706
·
1 Parent(s): b8b5c5a

Protocol 22: Unified Telemetry & Entropy Gating Synthesis

Browse files
app.py CHANGED
@@ -406,12 +406,12 @@ with gr.Blocks(theme=gr.themes.Monochrome(), title="LOGOS SPCW Protocol") as dem
406
 
407
  {kb_context}
408
  """
409
- response = agent.chat(message, system_prompt=logos_context)
410
 
411
  # If Local failed, try Cloud Dolphin
412
  if "[Local LLM Error]" in response:
413
  agent = get_connector('dolphin')
414
- response = agent.chat(message, system_prompt=logos_context)
415
 
416
  history[-1] = (message, response)
417
  except Exception as e:
 
406
 
407
  {kb_context}
408
  """
409
+ response, logprobs = agent.chat(message, system_prompt=logos_context)
410
 
411
  # If Local failed, try Cloud Dolphin
412
  if "[Local LLM Error]" in response:
413
  agent = get_connector('dolphin')
414
+ response, logprobs = agent.chat(message, system_prompt=logos_context)
415
 
416
  history[-1] = (message, response)
417
  except Exception as e:
logos/agent_dispatcher.py CHANGED
@@ -384,7 +384,13 @@ class LogosSwarm:
384
 
385
  gemma_status, mass = gemma_task.result()
386
  res = rnj1_task.result()
387
- reasoning = reasoning_task.result()
 
 
 
 
 
 
388
 
389
  # 3. VECTORIZATION (The Twist)
390
  # Transform node identity into Rotational Field Coordinates
@@ -496,7 +502,7 @@ class NeuralRouter:
496
  prompt = f"{ROUTER_PROMPT}\n\nUser Input: \"{user_input}\""
497
 
498
  # We use a lower temperature for routing to be deterministic
499
- response = self.connector.chat(prompt, system_prompt="You are a classifier system. Output JSON only.")
500
 
501
  # Attempt to parse JSON
502
  try:
@@ -545,7 +551,7 @@ class NeuralRouter:
545
  "prime_navigator": "[RESONANCE_SCAN: Modulo 9973]"
546
  }.get(intent, "")
547
 
548
- response = self.connector.chat(f"{tools} {user_input}",
549
  system_prompt=persona['system_prompt'],
550
  model=persona['model'])
551
 
 
384
 
385
  gemma_status, mass = gemma_task.result()
386
  res = rnj1_task.result()
387
+ reasoning, logprobs = reasoning_task.result()
388
+
389
+ # Update Entropy Kill Switch with reasoning wave telemetry
390
+ if logprobs:
391
+ # logprobs from OpenAI usually contain 'content' if it's chat/completion
392
+ log_content = logprobs.get('content', [])
393
+ self.oversight.kill_switch.monitor_bulk(log_content)
394
 
395
  # 3. VECTORIZATION (The Twist)
396
  # Transform node identity into Rotational Field Coordinates
 
502
  prompt = f"{ROUTER_PROMPT}\n\nUser Input: \"{user_input}\""
503
 
504
  # We use a lower temperature for routing to be deterministic
505
+ response, _ = self.connector.chat(prompt, system_prompt="You are a classifier system. Output JSON only.")
506
 
507
  # Attempt to parse JSON
508
  try:
 
551
  "prime_navigator": "[RESONANCE_SCAN: Modulo 9973]"
552
  }.get(intent, "")
553
 
554
+ response, _ = self.connector.chat(f"{tools} {user_input}",
555
  system_prompt=persona['system_prompt'],
556
  model=persona['model'])
557
 
logos/agents/dolphin.py CHANGED
@@ -51,8 +51,8 @@ class EntropyKillSwitch:
51
  if len(self.entropy_trace) > self.window_size:
52
  self.entropy_trace.pop(0)
53
 
54
- # Calculate Rolling Average
55
- avg_entropy = np.mean(self.entropy_trace)
56
 
57
  # THE KILL SWITCH
58
  if avg_entropy > self.threshold:
@@ -62,6 +62,41 @@ class EntropyKillSwitch:
62
  self.status = "STABLE"
63
  return False # CONTINUE
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  class DolphinOversight:
66
  def __init__(self, swarm_state=None):
67
  self.name = "Dolphin-x1-8b"
 
51
  if len(self.entropy_trace) > self.window_size:
52
  self.entropy_trace.pop(0)
53
 
54
+ # Calculate Rolling Average (if we have data)
55
+ avg_entropy = np.mean(self.entropy_trace) if self.entropy_trace else 0
56
 
57
  # THE KILL SWITCH
58
  if avg_entropy > self.threshold:
 
62
  self.status = "STABLE"
63
  return False # CONTINUE
64
 
65
+ def monitor_bulk(self, logprobs_content):
66
+ """
67
+ Analyzes a bulk list of logprobs from a non-streaming response.
68
+ Expected format: choices[0].logprobs.content (List of Dicts)
69
+ """
70
+ if not logprobs_content:
71
+ return
72
+
73
+ temp_trace = []
74
+ for entry in logprobs_content:
75
+ # Entry usually has 'top_logprobs' which we can use for entropy
76
+ top_lp = entry.get('top_logprobs', [])
77
+ if not top_lp:
78
+ # Fallback to single logprob (minimal entropy but better than nothing)
79
+ lp = entry.get('logprob', -100)
80
+ entropy = - (math.exp(lp) * lp) if lp > -20 else 1.0 # Rough approximation
81
+ else:
82
+ # Calculate from full distribution
83
+ dist = {e.get('token'): e.get('logprob') for e in top_lp}
84
+ entropy = self.calculate_entropy(dist)
85
+
86
+ temp_trace.append(entropy)
87
+
88
+ if temp_trace:
89
+ # We take the mean of the bulk message as a single data point or fill the trace
90
+ batch_avg = np.mean(temp_trace)
91
+ self.entropy_trace.append(batch_avg)
92
+ if batch_avg > self.threshold:
93
+ self.status = "HALLUCINATION_DETECTED"
94
+ else:
95
+ self.status = "STABLE"
96
+
97
+ if len(self.entropy_trace) > self.window_size:
98
+ self.entropy_trace.pop(0)
99
+
100
  class DolphinOversight:
101
  def __init__(self, swarm_state=None):
102
  self.name = "Dolphin-x1-8b"
logos/connectors.py CHANGED
@@ -246,20 +246,20 @@ class DolphinAgentConnector:
246
 
247
  # Using basic text generation if chat template fails, but try chat first
248
  # Many HF models support chat_completion API via InferenceClient
249
- try:
250
  response = client.chat_completion(
251
  messages=messages,
252
  model=self.model,
253
  max_tokens=500
254
  )
255
- return response.choices[0].message.content
256
  except Exception:
257
  # Fallback to text generation
258
  prompt = f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
259
- return client.text_generation(prompt, model=self.model, max_new_tokens=500)
 
260
 
261
  except Exception as e:
262
- return f"[Dolphin Error] {e}"
263
 
264
  def analyze_diagram(self, image_path: str, prompt: str = "Describe this architectural diagram.") -> str:
265
  """
@@ -308,6 +308,8 @@ class LocalLLMConnector:
308
  if system_prompt:
309
  payload["messages"].append({"role": "system", "content": system_prompt})
310
  payload["messages"].append({"role": "user", "content": message})
 
 
311
 
312
  endpoint = f"{self.base_url}/chat/completions"
313
  try:
@@ -315,11 +317,13 @@ class LocalLLMConnector:
315
  async with session.post(endpoint, json=payload, timeout=10) as response:
316
  if response.status == 200:
317
  data = await response.json()
318
- return data['choices'][0]['message']['content']
 
 
319
  else:
320
  return f"[Error] Local LLM returned status {response.status}"
321
  except Exception as e:
322
- return f"[Async Local LLM Error] {e}"
323
 
324
  def chat(self, message: str, system_prompt: str = None, model: str = None, image_path: str = None) -> str:
325
  """
@@ -348,7 +352,9 @@ class LocalLLMConnector:
348
  "model": target_model,
349
  "messages": [],
350
  "temperature": 0.7,
351
- "stream": False
 
 
352
  }
353
 
354
  if system_prompt:
@@ -379,13 +385,16 @@ class LocalLLMConnector:
379
  # Short timeout for local to fail fast
380
  response = requests.post(endpoint, json=payload, timeout=5)
381
  response.raise_for_status()
382
- data = response.json()
383
- return data['choices'][0]['message']['content']
 
 
 
384
  except Exception as e:
385
  last_error = str(e)
386
  continue
387
 
388
- return f"[Local LLM Error] Could not connect to Local Swarm on {endpoints}. Is LM Studio running? ({last_error})"
389
 
390
 
391
  # ==========================================
 
246
 
247
  # Using basic text generation if chat template fails, but try chat first
248
  # Many HF models support chat_completion API via InferenceClient
 
249
  response = client.chat_completion(
250
  messages=messages,
251
  model=self.model,
252
  max_tokens=500
253
  )
254
+ return response.choices[0].message.content, response.choices[0].get('logprobs')
255
  except Exception:
256
  # Fallback to text generation
257
  prompt = f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
258
+ res = client.text_generation(prompt, model=self.model, max_new_tokens=500)
259
+ return res, None
260
 
261
  except Exception as e:
262
+ return f"[Dolphin Error] {e}", None
263
 
264
  def analyze_diagram(self, image_path: str, prompt: str = "Describe this architectural diagram.") -> str:
265
  """
 
308
  if system_prompt:
309
  payload["messages"].append({"role": "system", "content": system_prompt})
310
  payload["messages"].append({"role": "user", "content": message})
311
+ payload["logprobs"] = True
312
+ payload["top_logprobs"] = 1
313
 
314
  endpoint = f"{self.base_url}/chat/completions"
315
  try:
 
317
  async with session.post(endpoint, json=payload, timeout=10) as response:
318
  if response.status == 200:
319
  data = await response.json()
320
+ content = data['choices'][0]['message'].get('content', "")
321
+ logprobs = data['choices'][0].get('logprobs')
322
+ return content, logprobs
323
  else:
324
  return f"[Error] Local LLM returned status {response.status}"
325
  except Exception as e:
326
+ return f"[Async Local LLM Error] {e}", None
327
 
328
  def chat(self, message: str, system_prompt: str = None, model: str = None, image_path: str = None) -> str:
329
  """
 
352
  "model": target_model,
353
  "messages": [],
354
  "temperature": 0.7,
355
+ "stream": False,
356
+ "logprobs": True,
357
+ "top_logprobs": 1
358
  }
359
 
360
  if system_prompt:
 
385
  # Short timeout for local to fail fast
386
  response = requests.post(endpoint, json=payload, timeout=5)
387
  response.raise_for_status()
388
+ if response.status_code == 200:
389
+ data = response.json()
390
+ content = data['choices'][0]['message'].get('content', "")
391
+ logprobs = data['choices'][0].get('logprobs')
392
+ return content, logprobs
393
  except Exception as e:
394
  last_error = str(e)
395
  continue
396
 
397
+ return f"[Local LLM Error] Could not connect to Local Swarm on {endpoints}. Is LM Studio running? ({last_error})", None
398
 
399
 
400
  # ==========================================
logos/ingest_knowledge.py CHANGED
@@ -71,7 +71,7 @@ def ingest_diagrams():
71
  try:
72
  # Transmute Visual -> Text
73
  start_ts = time.time()
74
- analysis = agent.chat(ANALYSIS_PROMPT, image_path=img_path)
75
  duration = time.time() - start_ts
76
 
77
  # Anneal into Knowledge Base
 
71
  try:
72
  # Transmute Visual -> Text
73
  start_ts = time.time()
74
+ analysis, _ = agent.chat(ANALYSIS_PROMPT, image_path=img_path)
75
  duration = time.time() - start_ts
76
 
77
  # Anneal into Knowledge Base
logos/server.py CHANGED
@@ -414,7 +414,12 @@ def chat_completions():
414
 
415
  if resp.status_code == 200:
416
  resp_json = resp.json()
417
- response_text = resp_json['choices'][0]['message']['content']
 
 
 
 
 
418
  else:
419
  response_text = f"[Backend Error {resp.status_code}] {resp.text}"
420
 
 
414
 
415
  if resp.status_code == 200:
416
  resp_json = resp.json()
417
+ response_text = resp_json['choices'][0]['message'].get('content', "")
418
+
419
+ # Protocol 22: Update entropy monitor from server-side proxy
420
+ logprobs = resp_json['choices'][0].get('logprobs')
421
+ if logprobs and 'content' in logprobs:
422
+ swarm_os.oversight.kill_switch.monitor_bulk(logprobs['content'])
423
  else:
424
  response_text = f"[Backend Error {resp.status_code}] {resp.text}"
425