GitHub Copilot commited on
Commit
75b641f
·
1 Parent(s): e717c2f

Protocol 22: Universal Model Gating & Network Layer Fusion

Browse files
logos/__pycache__/connectors.cpython-313.pyc CHANGED
Binary files a/logos/__pycache__/connectors.cpython-313.pyc and b/logos/__pycache__/connectors.cpython-313.pyc differ
 
logos/agent_dispatcher.py CHANGED
@@ -348,8 +348,8 @@ class LogosSwarm:
348
  Protocol 21: The Async Interference Bus.
349
  Agents pulse in parallel; the solution is the geometric intersection.
350
  """
351
- def __init__(self, base_url="http://localhost:1234/v1"):
352
- self.connector = get_connector('local', base_url=base_url)
353
  self.state = {
354
  "last_node": 1,
355
  "tensor_history": [],
@@ -490,7 +490,7 @@ class NeuralRouter:
490
  Implements the Routing Logic to dispatch tasks to the Nano Swarm.
491
  """
492
 
493
- def __init__(self, base_url: str = "http://192.168.0.105:1234/v1", router_model: str = "google/gemma-3-4b"):
494
  # The router itself uses a fast model (Gemma 4B)
495
  self.connector = get_connector('local', base_url=base_url, model=router_model)
496
  self.history = []
 
348
  Protocol 21: The Async Interference Bus.
349
  Agents pulse in parallel; the solution is the geometric intersection.
350
  """
351
+ def __init__(self, base_url="http://localhost:1234/v1", model="google/gemma-3-4b"):
352
+ self.connector = get_connector('local', base_url=base_url, model=model)
353
  self.state = {
354
  "last_node": 1,
355
  "tensor_history": [],
 
490
  Implements the Routing Logic to dispatch tasks to the Nano Swarm.
491
  """
492
 
493
+ def __init__(self, base_url: str = "http://localhost:1234/v1", router_model: str = "google/gemma-3-4b"):
494
  # The router itself uses a fast model (Gemma 4B)
495
  self.connector = get_connector('local', base_url=base_url, model=router_model)
496
  self.history = []
logos/connectors.py CHANGED
@@ -99,7 +99,6 @@ class OCRConnector:
99
 
100
  def __init__(self, languages: List[str] = None, gpu: bool = False):
101
  # We rely on the local LLM connector, 'gpu' arg is ignored as it's handled by LM Studio
102
- from .connectors import get_connector
103
  # Hardcoded to Gemma as requested by user ("gemma is your vision model")
104
  self.client = get_connector('local', model="google/gemma-3-4b")
105
 
@@ -286,7 +285,7 @@ class LocalLLMConnector:
286
  Optimization: Direct localhost access (no Docker bridge lag).
287
  """
288
 
289
- def __init__(self, base_url: str = None, model: str = "local-model"):
290
  # Prioritize Environment -> Argument -> Default
291
  env_url = os.environ.get("LOGOS_LLM_ENDPOINT")
292
  self.base_url = base_url or env_url or "http://localhost:1234/v1"
@@ -322,7 +321,7 @@ class LocalLLMConnector:
322
  logprobs = data['choices'][0].get('logprobs')
323
  return content, logprobs
324
  else:
325
- return f"[Error] Local LLM returned status {response.status}"
326
  except Exception as e:
327
  return f"[Async Local LLM Error] {e}", None
328
 
 
99
 
100
  def __init__(self, languages: List[str] = None, gpu: bool = False):
101
  # We rely on the local LLM connector, 'gpu' arg is ignored as it's handled by LM Studio
 
102
  # Hardcoded to Gemma as requested by user ("gemma is your vision model")
103
  self.client = get_connector('local', model="google/gemma-3-4b")
104
 
 
285
  Optimization: Direct localhost access (no Docker bridge lag).
286
  """
287
 
288
+ def __init__(self, base_url: str = None, model: str = "google/gemma-3-4b"):
289
  # Prioritize Environment -> Argument -> Default
290
  env_url = os.environ.get("LOGOS_LLM_ENDPOINT")
291
  self.base_url = base_url or env_url or "http://localhost:1234/v1"
 
321
  logprobs = data['choices'][0].get('logprobs')
322
  return content, logprobs
323
  else:
324
+ return f"[Error] Local LLM returned status {response.status}", None
325
  except Exception as e:
326
  return f"[Async Local LLM Error] {e}", None
327
 
logos/network/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/logos/network/__pycache__/__init__.cpython-313.pyc and b/logos/network/__pycache__/__init__.cpython-313.pyc differ
 
logos/server.py CHANGED
@@ -62,7 +62,7 @@ logger = logging.getLogger("LOGOS_Router")
62
  # Protocol 5: Tiered Token Consumption
63
  # Protocol 10: Swarm Optimization (Unified Model Mode)
64
  FORCE_SINGLE_MODEL = True # Set to True to prevent VRAM trashing by model swapping
65
- UNIFIED_MODEL_ID = "local-model" # Points to whatever is currently loaded in LM Studio
66
 
67
  SHELL_CONFIG = {
68
  "INNER_SHELL": {
 
62
  # Protocol 5: Tiered Token Consumption
63
  # Protocol 10: Swarm Optimization (Unified Model Mode)
64
  FORCE_SINGLE_MODEL = True # Set to True to prevent VRAM trashing by model swapping
65
+ UNIFIED_MODEL_ID = "google/gemma-3-4b" # Points to the active model in LM Studio
66
 
67
  SHELL_CONFIG = {
68
  "INNER_SHELL": {