GitHub Copilot commited on
Commit
9fa373c
·
1 Parent(s): ad814f5

Add MTL/Kernel API endpoints, enhanced logging for tool calls

Browse files
Files changed (1) hide show
  1. logos/server.py +219 -47
logos/server.py CHANGED
@@ -28,15 +28,18 @@ if sys.platform == 'win32':
28
  sys.stderr = codecs.getwriter("utf-8")(sys.stderr.detach())
29
 
30
  # --- CONFIGURATION ---
31
- HOST = "0.0.0.0"
32
- PORT = 5000
 
 
 
33
 
34
  # Initialize the Flask "Manifold"
35
  app = Flask(__name__)
36
  sock = Sock(app)
37
  CORS(app, resources={r"/*": {"origins": "*"}}) # Full Permissive CORS for Local Swarm
38
  # We use the existing NeuralRouter logic but adapted for this server
39
- swarm_os = LogosSwarm(base_url="http://localhost:1234/v1")
40
  v_node = VideoAtomizer()
41
 
42
  # Global Client Manager for Broadcast Pulse
@@ -61,6 +64,46 @@ class ConnectionManager:
61
 
62
  manager = ConnectionManager()
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  # --- MANIFOLD STATE TRACKING ---
65
  from logos.manifold_state import ManifoldState
66
  manifold = ManifoldState()
@@ -81,10 +124,71 @@ from logos.mhc_router import execute_recursive_manifold, calculate_manifold_cons
81
  from logos.memory.prime_db import PrimeTokenDB
82
  prime_db = PrimeTokenDB()
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  # Simple in-memory index for the session (Simulating the Topology Graph)
85
  # Map[composite_id] -> filepath
86
  TOPOLOGY_INDEX = {}
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  # --- API ENDPOINTS ---
89
 
90
  @app.route('/', methods=['GET'])
@@ -215,6 +319,60 @@ def ingest_signal():
215
  "norm_minimized": True
216
  })
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  @app.route('/favicon.ico', methods=['GET'])
219
  def favicon():
220
  return "", 204
@@ -248,6 +406,11 @@ def chat_completions():
248
  messages = data.get('messages', [])
249
  target_model = data.get('model', UNIFIED_MODEL_ID)
250
 
 
 
 
 
 
251
  if not messages: return jsonify({"error": "No messages provided"}), 400
252
 
253
  last_msg = next((m for m in reversed(messages) if m['role'] == 'user'), None)
@@ -263,55 +426,64 @@ def chat_completions():
263
  else:
264
  last_prompt = last_msg['content']
265
 
266
- # --- EXECUTE PROTOCOL 25 (RLM) ---
267
- final_state, trajectory, atomic_state_obj = execute_recursive_manifold(last_prompt, target_model)
268
 
269
- # Calculate usage (Mock for now)
270
- prompt_tokens = len(last_prompt) // 4
271
- completion_tokens = len(final_state) // 4
272
- total_tokens = prompt_tokens + completion_tokens
273
-
274
- # Update Global Manifold State (Persistence)
275
- shell, _ = calculate_manifold_constraint(last_prompt)
276
-
277
- manifold.update_shell_stats(shell, total_tokens, last_prompt)
278
-
279
- # [FIX] Merge transient Atomic Graph -> Global Persistence
280
- # This populates the "3D Manifold Structure" in the UI
281
- if hasattr(atomic_state_obj, "graph"):
282
- # We manually merge for now. Better would be a method in ManifoldState.
283
- # manifold.graph["nodes"].update(atomic_state_obj.graph["nodes"])
284
- # But wait, atomic_state_obj is of type ManifoldState too!
285
- # So we can just merge the dictionaries.
286
 
287
- # Merge Nodes
288
- for nid, n_data in atomic_state_obj.graph["nodes"].items():
289
- manifold.graph["nodes"][nid] = n_data
 
 
290
 
291
- # [GEOMETRY] Ensure every node has 3D coordinates for the Plotly visualizer
292
- if "geometry" not in n_data:
293
- # Use Prime Resonance as coordinates
294
- # X = Heat, Y = Prime (Mod 100), Z = Domain Depth
295
- prime_val = n_data.get("prime", 2)
296
- heat_val = n_data.get("heat", 0)
297
-
298
- domain_map = {"INNER_SHELL": 0, "PRIME_CHANNEL": 5, "OUTER_SHELL": 10}
299
- z_depth = domain_map.get(shell, 5) + (prime_val % 5)
300
-
301
- n_data["geometry"] = {
302
- "position": {
303
- "x": heat_val * 10,
304
- "y": prime_val % 100,
305
- "z": z_depth
306
- },
307
- "domain": shell
308
- }
309
 
310
- # Merge Edges
311
- manifold.graph["edges"].extend(atomic_state_obj.graph["edges"])
 
 
312
 
313
- # Update Resonance Product
314
- manifold.resonance_product = atomic_state_obj.resonance_product
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
  return jsonify({
317
  "id": f"chatcmpl-logos-{int(time.time())}",
 
28
  sys.stderr = codecs.getwriter("utf-8")(sys.stderr.detach())
29
 
30
  # --- CONFIGURATION ---
31
+ from logos.config import SERVER_HOST, SERVER_PORT, LLM_ENDPOINT, UNIFIED_MODEL_ID
32
+
33
+ # --- CONFIGURATION ---
34
+ HOST = SERVER_HOST
35
+ PORT = SERVER_PORT
36
 
37
  # Initialize the Flask "Manifold"
38
  app = Flask(__name__)
39
  sock = Sock(app)
40
  CORS(app, resources={r"/*": {"origins": "*"}}) # Full Permissive CORS for Local Swarm
41
  # We use the existing NeuralRouter logic but adapted for this server
42
+ swarm_os = LogosSwarm(base_url=LLM_ENDPOINT)
43
  v_node = VideoAtomizer()
44
 
45
  # Global Client Manager for Broadcast Pulse
 
64
 
65
  manager = ConnectionManager()
66
 
67
+ @sock.route('/neural-link')
68
+ def neural_link(ws):
69
+ """
70
+ Protocol 19: WebSocket Neural Bridge for Realtime Telemetry.
71
+ """
72
+ manager.connect(ws)
73
+ try:
74
+ while True:
75
+ data = ws.receive()
76
+ if data:
77
+ # Handle Command from GUI
78
+ import json
79
+ try:
80
+ payload = json.loads(data)
81
+ content = payload.get('content')
82
+
83
+ if content:
84
+ logger.info(f"[GUI] Received Command: {content}")
85
+ # Execute via Swarm (Async run in thread)
86
+ # We use a simple non-blocking trigger here
87
+ loop = asyncio.new_event_loop()
88
+ asyncio.set_event_loop(loop)
89
+ res = loop.run_until_complete(swarm_os.process(content))
90
+ loop.close()
91
+
92
+ # Broadcast Result Back
93
+ manager.broadcast({
94
+ "type": "TENSOR_UPDATE",
95
+ "node": res.get('node'),
96
+ "origin": swarm_os.state.get('last_node', 0),
97
+ "tensor": res.get('tensor'),
98
+ "status": res.get('status')
99
+ })
100
+ except Exception as e:
101
+ logger.error(f"[GUI] WS Error: {e}")
102
+ except:
103
+ pass
104
+ finally:
105
+ manager.disconnect(ws)
106
+
107
  # --- MANIFOLD STATE TRACKING ---
108
  from logos.manifold_state import ManifoldState
109
  manifold = ManifoldState()
 
124
  from logos.memory.prime_db import PrimeTokenDB
125
  prime_db = PrimeTokenDB()
126
 
127
+ # ==========================================
128
+ # PROTOCOL 40: MTL INTERPRETER (Genesis Kernel)
129
+ # ==========================================
130
+ try:
131
+ from logos.mtl.interpreter import MTLInterpreter
132
+ from logos.kernel import GenesisKernel
133
+ mtl_interpreter = MTLInterpreter()
134
+ genesis_kernel = GenesisKernel()
135
+ MTL_AVAILABLE = True
136
+ logger.info("[SERVER] MTL Interpreter and Genesis Kernel loaded")
137
+ except ImportError as e:
138
+ mtl_interpreter = None
139
+ genesis_kernel = None
140
+ MTL_AVAILABLE = False
141
+ logger.warning(f"[SERVER] MTL not available: {e}")
142
+
143
  # Simple in-memory index for the session (Simulating the Topology Graph)
144
  # Map[composite_id] -> filepath
145
  TOPOLOGY_INDEX = {}
146
 
147
+ # --- PROTOCOL 29: CONTEXT SERVICE ENDPOINTS ---
148
+
149
+ @app.route('/v1/context/neurons', methods=['POST'])
150
+ def upsert_neurons():
151
+ """Batch Upsert Neurons."""
152
+ data = request.json
153
+ neurons = data.get('neurons', [])
154
+ updated = []
155
+ for n in neurons:
156
+ updated.append(manifold.upsert_neuron(n))
157
+ return jsonify({"status": "success", "upserted": len(updated), "neurons": updated})
158
+
159
+ @app.route('/v1/context/buffer', methods=['POST'])
160
+ def update_context_buffer():
161
+ """
162
+ Protocol 30: Context Injection.
163
+ Updates the active swarm memory from external agents (like the CLI Video Atomizer).
164
+ """
165
+ data = request.json
166
+ atoms = data.get('atoms', [])
167
+ if atoms:
168
+ swarm_os.state['context_buffer'] = atoms
169
+ logger.info(f"[CONTEXT] Buffer Updated via API: {atoms}")
170
+ return jsonify({"status": "UPDATED", "count": len(atoms)})
171
+ return jsonify({"status": "NO_CHANGE"}), 400
172
+
173
+ @app.route('/v1/context/query', methods=['POST'])
174
+ def query_context():
175
+ """Semantic/Topological Query."""
176
+ data = request.json
177
+ results = manifold.query_neurons(
178
+ query_text=data.get('query_text'),
179
+ filters=data.get('filters'),
180
+ limit=data.get('limit', 10)
181
+ )
182
+ return jsonify({"results": results, "count": len(results)})
183
+
184
+ @app.route('/v1/context/neuron/<int:prime_index>', methods=['GET'])
185
+ def get_neuron_prime(prime_index):
186
+ """Direct Access by Prime Index."""
187
+ neuron = manifold.get_neuron_by_prime(prime_index)
188
+ if neuron:
189
+ return jsonify(neuron)
190
+ return jsonify({"error": "Not Found"}), 404
191
+
192
  # --- API ENDPOINTS ---
193
 
194
  @app.route('/', methods=['GET'])
 
319
  "norm_minimized": True
320
  })
321
 
322
+ # ==========================================
323
+ # PROTOCOL 40: MTL EXECUTION ENDPOINT
324
+ # ==========================================
325
+ @app.route('/v1/mtl/execute', methods=['POST'])
326
+ def execute_mtl():
327
+ """Execute MTL code via API."""
328
+ if not MTL_AVAILABLE:
329
+ return jsonify({"error": "MTL not available"}), 503
330
+
331
+ data = request.json
332
+ code = data.get('code', '')
333
+
334
+ if not code:
335
+ return jsonify({"error": "No code provided"}), 400
336
+
337
+ logger.info(f"[MTL] Executing: {code[:100]}...")
338
+
339
+ try:
340
+ result = mtl_interpreter.execute(code)
341
+ logger.info(f"[MTL] Result: {result}")
342
+ return jsonify({
343
+ "status": "success",
344
+ "result": result,
345
+ "code": code
346
+ })
347
+ except Exception as e:
348
+ logger.error(f"[MTL] Error: {e}")
349
+ return jsonify({"error": str(e)}), 400
350
+
351
+ @app.route('/v1/kernel/process', methods=['POST'])
352
+ def kernel_process():
353
+ """Process a packet through the Genesis Kernel."""
354
+ if not MTL_AVAILABLE or not genesis_kernel:
355
+ return jsonify({"error": "Kernel not available"}), 503
356
+
357
+ data = request.json
358
+ packet = data.get('packet')
359
+ source = data.get('source', 'API')
360
+
361
+ if not packet:
362
+ return jsonify({"error": "No packet provided"}), 400
363
+
364
+ logger.info(f"[KERNEL] Processing packet {packet} from {source}")
365
+
366
+ try:
367
+ result = genesis_kernel.process_packet(int(packet), source=source)
368
+ return jsonify({
369
+ "status": "success",
370
+ "result": result
371
+ })
372
+ except Exception as e:
373
+ logger.error(f"[KERNEL] Error: {e}")
374
+ return jsonify({"error": str(e)}), 400
375
+
376
  @app.route('/favicon.ico', methods=['GET'])
377
  def favicon():
378
  return "", 204
 
406
  messages = data.get('messages', [])
407
  target_model = data.get('model', UNIFIED_MODEL_ID)
408
 
409
+ # [FIX] VIRTUAL ID MAPPING
410
+ # If the user/CLI requests the virtual router, map it to the underlying inference engine
411
+ if target_model == "logos-matroska-router":
412
+ target_model = UNIFIED_MODEL_ID
413
+
414
  if not messages: return jsonify({"error": "No messages provided"}), 400
415
 
416
  last_msg = next((m for m in reversed(messages) if m['role'] == 'user'), None)
 
426
  else:
427
  last_prompt = last_msg['content']
428
 
429
+ # --- EXECUTE PROTOCOL 25 (RLM) or SWARM DELEGATION ---
 
430
 
431
+ # 1. Swarm Delegation (Protocols 17 & 27)
432
+ if last_prompt.startswith("SWARM:") or last_prompt.startswith("RUN_FLOW:"):
433
+ # Direct Handoff to the Neural Router / Swarm
434
+ # Since swarm methods are async, we run them in a new event loop
435
+ loop = asyncio.new_event_loop()
436
+ asyncio.set_event_loop(loop)
 
 
 
 
 
 
 
 
 
 
 
437
 
438
+ if last_prompt.startswith("RUN_FLOW:"):
439
+ flow_name = last_prompt.replace("RUN_FLOW:", "").strip()
440
+ # Resolve path
441
+ flow_path = os.path.join(os.getcwd(), ".agent", "flows", flow_name)
442
+ if not flow_path.endswith(".json"): flow_path += ".json"
443
 
444
+ logger.info(f"[SERVER] Delegating Flow to Swarm: {flow_name}")
445
+ result = loop.run_until_complete(swarm_os.execute_flow(flow_path))
446
+ final_state = f"FLOW_EXECUTION_COMPLETE\nResult: {result}"
447
+ else:
448
+ # SWARM: ...
449
+ payload = last_prompt.replace("SWARM:", "").strip()
450
+ logger.info(f"[SERVER] Delegating Task to Swarm: {payload}")
451
+ result = loop.run_until_complete(swarm_os.process(payload))
452
+ final_state = f"SWARM_OP_COMPLETE\nNode: {result.get('node')}\nAlignment: {result.get('alignment')}\nTensor: {result.get('tensor')}"
 
 
 
 
 
 
 
 
 
453
 
454
+ loop.close()
455
+
456
+ # Create a mock trajectory for the response format
457
+ trajectory = [{"iter": 0, "shell": "SWARM_DELEGATE"}]
458
 
459
+ else:
460
+ # 2. Default Recursive Manifold (Protocol 25)
461
+ final_state, trajectory, atomic_state_obj = execute_recursive_manifold(last_prompt, target_model)
462
+
463
+ # [FIX] Merge transient Atomic Graph -> Global Persistence (Only for RLM)
464
+ if hasattr(atomic_state_obj, "graph"):
465
+ # Merge Nodes
466
+ for nid, n_data in atomic_state_obj.graph["nodes"].items():
467
+ manifold.graph["nodes"][nid] = n_data
468
+ if "geometry" not in n_data:
469
+ prime_val = n_data.get("prime", 2)
470
+ heat_val = n_data.get("heat", 0)
471
+ shell = trajectory[-1]['shell'] if trajectory else "INNER_SHELL"
472
+
473
+ domain_map = {"INNER_SHELL": 0, "PRIME_CHANNEL": 5, "OUTER_SHELL": 10}
474
+ z_depth = domain_map.get(shell, 5) + (prime_val % 5)
475
+
476
+ n_data["geometry"] = {
477
+ "position": {"x": heat_val * 10, "y": prime_val % 100, "z": z_depth},
478
+ "domain": shell
479
+ }
480
+ manifold.graph["edges"].extend(atomic_state_obj.graph["edges"])
481
+ manifold.resonance_product = atomic_state_obj.resonance_product
482
+
483
+ # Construct Token Usage
484
+ prompt_tokens = len(last_prompt) // 4
485
+ completion_tokens = len(final_state) // 4
486
+ total_tokens = prompt_tokens + completion_tokens
487
 
488
  return jsonify({
489
  "id": f"chatcmpl-logos-{int(time.time())}",