GitHub Copilot commited on
Commit
edae06c
·
1 Parent(s): 1ca83c3

Protocol 22: Update HF Inference to Router endpoint

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. N8N_ARCHITECTURE.md +57 -3
  2. analyze_uploads.py +46 -4
  3. antigravity_workflow.json +345 -0
  4. app.py +209 -27
  5. dist/assets/index-EWlo7g0h.js +0 -0
  6. dist/assets/index-W_vgHdSs.css +1 -0
  7. dist/index.html +14 -0
  8. dist/vite.svg +1 -0
  9. logos/__pycache__/__init__.cpython-313.pyc +0 -0
  10. logos/__pycache__/__init__.cpython-314.pyc +0 -0
  11. logos/__pycache__/agent_dispatcher.cpython-313.pyc +0 -0
  12. logos/__pycache__/agent_dispatcher.cpython-314.pyc +0 -0
  13. logos/__pycache__/baker.cpython-313.pyc +0 -0
  14. logos/__pycache__/baker.cpython-314.pyc +0 -0
  15. logos/__pycache__/connectors.cpython-313.pyc +0 -0
  16. logos/__pycache__/connectors.cpython-314.pyc +0 -0
  17. logos/__pycache__/dsp_bridge.cpython-313.pyc +0 -0
  18. logos/__pycache__/dsp_bridge.cpython-314.pyc +0 -0
  19. logos/__pycache__/fractal_engine.cpython-313.pyc +0 -0
  20. logos/__pycache__/fractal_engine.cpython-314.pyc +0 -0
  21. logos/__pycache__/image_analyzer.cpython-313.pyc +0 -0
  22. logos/__pycache__/image_analyzer.cpython-314.pyc +0 -0
  23. logos/__pycache__/ingest_knowledge.cpython-313.pyc +0 -0
  24. logos/__pycache__/logos_core.cpython-313.pyc +0 -0
  25. logos/__pycache__/logos_core.cpython-314.pyc +0 -0
  26. logos/__pycache__/manifold_state.cpython-313.pyc +0 -0
  27. logos/__pycache__/manifold_state.cpython-314.pyc +0 -0
  28. logos/__pycache__/network.cpython-313.pyc +0 -0
  29. logos/__pycache__/network.cpython-314.pyc +0 -0
  30. logos/__pycache__/ocr_pipeline.cpython-313.pyc +0 -0
  31. logos/__pycache__/server.cpython-313.pyc +0 -0
  32. logos/__pycache__/server.cpython-314.pyc +0 -0
  33. logos/agent_dispatcher.py +614 -0
  34. logos/agents/__pycache__/connector.cpython-313.pyc +0 -0
  35. logos/agents/__pycache__/dolphin.cpython-314.pyc +0 -0
  36. logos/agents/__pycache__/life.cpython-313.pyc +0 -0
  37. logos/agents/__pycache__/scout.cpython-313.pyc +0 -0
  38. logos/agents/__pycache__/sensor_agent.cpython-314.pyc +0 -0
  39. logos/agents/__pycache__/tokenizer.cpython-313.pyc +0 -0
  40. logos/agents/__pycache__/video_atomizer.cpython-314.pyc +0 -0
  41. logos/agents/connector.py +126 -0
  42. logos/agents/dolphin.py +175 -0
  43. logos/agents/life.py +110 -0
  44. logos/agents/orchestrator.py +45 -0
  45. logos/agents/scout.py +78 -0
  46. logos/agents/sensor_agent.py +60 -0
  47. logos/agents/tokenizer.py +99 -0
  48. logos/agents/video_atomizer.py +73 -0
  49. logos/connectors.py +191 -53
  50. logos/indexer.py +130 -0
N8N_ARCHITECTURE.md CHANGED
@@ -5,12 +5,58 @@ The "Google Antigravity" Neural Router
5
  Treat n8n as a **Neural Router**, decoupling "Thinking" (Logic/Architecture) from "Inference" (Execution/Code). This bypasses latencies and refusals by routing tasks to the most efficient model.
6
 
7
  ## 2. Infrastructure: The "OpenAI-Compatible" Bridge
 
 
 
 
8
  Standardize all providers to the OpenAI API protocol.
9
 
10
  ### Local (Code & Privacy)
11
- - **Tool**: Ollama / LM Studio
12
- - **Endpoint**: `http://localhost:11434/v1`
13
- - **Model**: `dolphin-llama3` (Uncensored, fast, obedient)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  ### High-Speed Inference (Math & Logic)
16
  - **Tool**: DeepInfra / Groq
@@ -33,6 +79,14 @@ Classifies incoming request type:
33
  Use **Merge Node (Wait Mode)** to execute paths simultaneously.
34
  1. **Path 1 (Math)**: DeepSeek analyzes Prime Potentiality/Manifold logic.
35
  2. **Path 2 (Code)**: Dolphin writes adapters/scripts locally.
 
 
 
 
 
 
 
 
36
  3. **Path 3 (Sys)**: Gemini drafts Strategy/README.
37
 
38
  ### Phase C: Consensus (The Annealing)
 
5
  Treat n8n as a **Neural Router**, decoupling "Thinking" (Logic/Architecture) from "Inference" (Execution/Code). This bypasses latencies and refusals by routing tasks to the most efficient model.
6
 
7
  ## 2. Infrastructure: The "OpenAI-Compatible" Bridge
8
+ **Optimization**: Run n8n **NATIVELY** on Windows (`npm install -g n8n`) instead of Docker.
9
+ - **Why**: Eliminates the `host.docker.internal` bridge bottleneck.
10
+ - **Effect**: N8N talks directly to `localhost:1234` with zero latency overhead.
11
+
12
  Standardize all providers to the OpenAI API protocol.
13
 
14
  ### Local (Code & Privacy)
15
+ - **Tool**: Ollama / LM Studio (The "New Friends" Cluster)
16
+ - **Endpoint**:
17
+ - Ollama: `http://localhost:11434/v1`
18
+ - LM Studio: `http://localhost:1234/v1`
19
+ ### Local Stack (The "Nano Swarm")
20
+ Instead of one giant model, use a stack of specialized lightweight models to save RAM:
21
+ - **Router/Logic**: `nvidia/nemotron-3-nano` or `Phi-3-Mini` (High logic/param ratio).
22
+ - **Coding**: `deepseek-coder-6.7b` or `dolphin-2.9-llama3-8b`.
23
+ - **Creative**: `openhermes-2.5-mistral-7b`.
24
+
25
+ **Configuration**:
26
+ - **Endpoint**: `http://localhost:1234/v1`
27
+ - **Multi-Model**: If using LM Studio, load the specific model needed for the batch, or run multiple instances on ports `1234`, `1235`, `1236`.
28
+
29
+ ### Workflow Import
30
+ A ready-to-use workflow file has been generated at:
31
+ `hf_space/logos_n8n_workflow.json`
32
+
33
+ **Usage**:
34
+ 1. Open N8N Editor.
35
+ 2. Click **Workflow** > **Import from File**.
36
+ 3. Select `logos_n8n_workflow.json`.
37
+ 4. Execute. It will scan your codebase using the Local Nano Swarm.
38
+
39
+ ### Connection Health Check
40
+ Verify the stack is active with this rhyme test:
41
+ ```bash
42
+ curl http://localhost:1234/v1/chat/completions \
43
+ -H "Content-Type: application/json" \
44
+ -d '{
45
+ "model": "nvidia/nemotron-3-nano",
46
+ "messages": [
47
+ {"role": "system", "content": "Always answer in rhymes. Today is Thursday"},
48
+ {"role": "user", "content": "What day is it today?"}
49
+ ],
50
+ "temperature": 0.7,
51
+ "stream": false
52
+ }'
53
+ ```
54
+ 2. Click the **Local Server** icon (`<->`) on the left sidebar.
55
+ 3. Ensure settings:
56
+ - **Port**: `1234`
57
+ - **CORS**: On (Recommended)
58
+ 4. Click **Start Server**.
59
+ 5. *Green Light*: The log should say `Server listening on http://localhost:1234`.
60
 
61
  ### High-Speed Inference (Math & Logic)
62
  - **Tool**: DeepInfra / Groq
 
79
  Use **Merge Node (Wait Mode)** to execute paths simultaneously.
80
  1. **Path 1 (Math)**: DeepSeek analyzes Prime Potentiality/Manifold logic.
81
  2. **Path 2 (Code)**: Dolphin writes adapters/scripts locally.
82
+ - *Implementation Helper*:
83
+ ```python
84
+ # Use a pipeline as a high-level helper for local execution
85
+ from transformers import pipeline
86
+ pipe = pipeline("text-generation", model="dphn/Dolphin-X1-8B-GGUF")
87
+ messages = [{"role": "user", "content": "Write the adapter."}]
88
+ pipe(messages)
89
+ ```
90
  3. **Path 3 (Sys)**: Gemini drafts Strategy/README.
91
 
92
  ### Phase C: Consensus (The Annealing)
analyze_uploads.py CHANGED
@@ -9,14 +9,56 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
9
  from logos.connectors import get_connector
10
 
11
  def analyze_uploads():
12
- # Paths to uploaded images (Hardcoded from user context for this run)
13
- base_dir = r"C:\Users\Nauti\.gemini\antigravity\brain\e29720ce-4e26-4c61-a243-8010483b5424"
14
- images = glob.glob(os.path.join(base_dir, "uploaded_image_*.png"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  print(f"Found {len(images)} images to analyze.")
17
 
18
  ocr = get_connector('ocr')
19
- dolphin = get_connector('dolphin')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  for img_path in images:
22
  print(f"\n--- Analyzing {os.path.basename(img_path)} ---")
 
9
  from logos.connectors import get_connector
10
 
11
  def analyze_uploads():
12
+ import argparse
13
+ parser = argparse.ArgumentParser(description="Analyze images from multiple folders.")
14
+ parser.add_argument("paths", nargs='*', help="File paths or glob patterns")
15
+ parser.add_argument("--recursive", "-r", action="store_true", help="Recursive search")
16
+ args = parser.parse_args()
17
+
18
+ images = []
19
+ if not args.paths:
20
+ # Default behavior: Search current dir
21
+ print("No paths provided. Searching current directory.")
22
+ images = glob.glob("*.png")
23
+ else:
24
+ for pattern in args.paths:
25
+ # Handle recursive globs (e.g. **/*.png)
26
+ if args.recursive and "**" not in pattern:
27
+ pattern = os.path.join(pattern, "**")
28
+
29
+ # If pattern is a dir, add default extension
30
+ if os.path.isdir(pattern):
31
+ pattern = os.path.join(pattern, "*.png")
32
+
33
+ found = glob.glob(pattern, recursive=args.recursive)
34
+ images.extend(found)
35
+
36
+ # Remove duplicates
37
+ images = list(set(images))
38
 
39
  print(f"Found {len(images)} images to analyze.")
40
 
41
  ocr = get_connector('ocr')
42
+ ocr = get_connector('ocr')
43
+
44
+ # Try Local Stack first (Nano Swarm), fallback to Dolphin Cloud
45
+ try:
46
+ from logos.connectors import LocalLLMConnector
47
+ # Quick check if port 1234 is open
48
+ import socket
49
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
50
+ result = sock.connect_ex(('localhost', 1234))
51
+ sock.close()
52
+
53
+ if result == 0:
54
+ print("[INFO] Local Nano Swarm detected (Port 1234). Using Local Stack.")
55
+ dolphin = get_connector('local', model="local-model")
56
+ else:
57
+ print("[INFO] Local Stack offline. Falling back to Cloud Dolphin.")
58
+ dolphin = get_connector('dolphin')
59
+ except Exception as e:
60
+ print(f"[WARN] Local check failed ({e}). Defaulting to Cloud.")
61
+ dolphin = get_connector('dolphin')
62
 
63
  for img_path in images:
64
  print(f"\n--- Analyzing {os.path.basename(img_path)} ---")
antigravity_workflow.json ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "Antigravity Image Research Extractor",
3
+ "nodes": [
4
+ {
5
+ "parameters": {
6
+ "path": "C:/Users/Nauti/Desktop/LOGOS CURSOR/LOGOS Notes",
7
+ "fileExtensions": "jpg,jpeg,png,pdf,heic",
8
+ "options": {
9
+ "recurse": true
10
+ }
11
+ },
12
+ "id": "image_scanner",
13
+ "name": "Scan Images (Notes)",
14
+ "type": "n8n-nodes-base.readFilesFromFolder",
15
+ "position": [
16
+ 250,
17
+ 300
18
+ ]
19
+ },
20
+ {
21
+ "parameters": {
22
+ "jsCode": "// ROUTER: Classify images for specialized analysis\nconst images = items.map(item => {\n const fileName = item.json.fileName;\n const fileSize = item.json.size || 0;\n \n let taskType = 'general_vision';\n let priority = 1;\n \n // Route by filename patterns and size\n if (fileName.includes('diagram') || fileName.includes('sketch')) {\n taskType = 'diagram_analysis';\n priority = 3;\n } else if (fileName.includes('note') || fileName.includes('handwritten')) {\n taskType = 'handwriting_ocr';\n priority = 2;\n } else if (fileName.includes('ui') || fileName.includes('interface')) {\n taskType = 'ui_analysis';\n priority = 3;\n } else if (fileSize < 500000) {\n taskType = 'handwriting_ocr'; // Smaller files likely notes\n priority = 2;\n } else {\n taskType = 'diagram_analysis'; // Larger files likely detailed diagrams\n priority = 3;\n }\n \n return {\n json: {\n fileName,\n taskType,\n priority,\n fullPath: item.json.directory + '/' + fileName,\n fileSize\n },\n binary: item.binary\n };\n});\n\nreturn images;"
23
+ },
24
+ "id": "router",
25
+ "name": "Neural Router",
26
+ "type": "n8n-nodes-base.code",
27
+ "position": [
28
+ 450,
29
+ 300
30
+ ]
31
+ },
32
+ {
33
+ "parameters": {
34
+ "conditions": {
35
+ "options": {
36
+ "caseSensitive": false
37
+ },
38
+ "conditions": [
39
+ {
40
+ "id": "handwriting_path",
41
+ "leftValue": "={{ $json.taskType }}",
42
+ "rightValue": "handwriting_ocr",
43
+ "operator": {
44
+ "type": "string",
45
+ "operation": "equals"
46
+ }
47
+ },
48
+ {
49
+ "id": "diagram_path",
50
+ "leftValue": "={{ $json.taskType }}",
51
+ "rightValue": "diagram_analysis",
52
+ "operator": {
53
+ "type": "string",
54
+ "operation": "equals"
55
+ }
56
+ },
57
+ {
58
+ "id": "ui_path",
59
+ "leftValue": "={{ $json.taskType }}",
60
+ "rightValue": "ui_analysis",
61
+ "operator": {
62
+ "type": "string",
63
+ "operation": "equals"
64
+ }
65
+ }
66
+ ]
67
+ },
68
+ "options": {}
69
+ },
70
+ "id": "switch",
71
+ "name": "Task Switch",
72
+ "type": "n8n-nodes-base.switch",
73
+ "position": [
74
+ 650,
75
+ 300
76
+ ]
77
+ },
78
+ {
79
+ "parameters": {
80
+ "method": "POST",
81
+ "url": "https://api-inference.huggingface.co/models/microsoft/trocr-base-handwritten",
82
+ "authentication": "genericCredentialType",
83
+ "genericAuthType": "httpHeaderAuth",
84
+ "sendHeaders": true,
85
+ "headerParameters": {
86
+ "parameters": [
87
+ {
88
+ "name": "Content-Type",
89
+ "value": "application/json"
90
+ }
91
+ ]
92
+ },
93
+ "sendBody": true,
94
+ "specifyBody": "json",
95
+ "jsonBody": "={\n \"inputs\": \"{{ $binary.data.toString('base64') }}\"\n}",
96
+ "options": {}
97
+ },
98
+ "id": "ocr_analyst",
99
+ "name": "OCR Handwriting (TrOCR)",
100
+ "type": "n8n-nodes-base.httpRequest",
101
+ "position": [
102
+ 850,
103
+ 200
104
+ ]
105
+ },
106
+ {
107
+ "parameters": {
108
+ "method": "POST",
109
+ "url": "http://localhost:1234/v1/chat/completions",
110
+ "authentication": "none",
111
+ "sendHeaders": true,
112
+ "headerParameters": {
113
+ "parameters": [
114
+ {
115
+ "name": "Content-Type",
116
+ "value": "application/json"
117
+ }
118
+ ]
119
+ },
120
+ "sendBody": true,
121
+ "specifyBody": "json",
122
+ "jsonBody": "={\n \"model\": \"llava-v1.6-mistral-7b\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a technical diagram analyst specializing in geometry, polyforms, and compression systems. Identify: 1) Mathematical concepts shown, 2) Geometric shapes/polyhedra types, 3) Compression techniques mentioned, 4) UI/workflow elements. Output structured JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Analyze this diagram from {{ $json.fileName }}. Focus on polyform development, compression methods, and UI design.\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"data:image/jpeg;base64,{{ $binary.data.toString('base64') }}\"\n }\n }\n ]\n }\n ],\n \"temperature\": 0.2,\n \"max_tokens\": 1500\n}",
123
+ "options": {}
124
+ },
125
+ "id": "diagram_analyst",
126
+ "name": "Diagram Analyst (LLaVA)",
127
+ "type": "n8n-nodes-base.httpRequest",
128
+ "position": [
129
+ 850,
130
+ 300
131
+ ]
132
+ },
133
+ {
134
+ "parameters": {
135
+ "method": "POST",
136
+ "url": "http://localhost:1234/v1/chat/completions",
137
+ "authentication": "none",
138
+ "sendHeaders": true,
139
+ "headerParameters": {
140
+ "parameters": [
141
+ {
142
+ "name": "Content-Type",
143
+ "value": "application/json"
144
+ }
145
+ ]
146
+ },
147
+ "sendBody": true,
148
+ "specifyBody": "json",
149
+ "jsonBody": "={\n \"model\": \"llava-v1.6-mistral-7b\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are a UI/UX analyst. Extract: 1) Interface components shown, 2) Interaction patterns, 3) Data visualization methods, 4) Success indicators mentioned, 5) User workflow steps. Output structured JSON.\"\n },\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": \"Analyze UI design from {{ $json.fileName }}. Identify successful patterns for polyform visualization and user interaction.\"\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": \"data:image/jpeg;base64,{{ $binary.data.toString('base64') }}\"\n }\n }\n ]\n }\n ],\n \"temperature\": 0.3,\n \"max_tokens\": 1200\n}",
150
+ "options": {}
151
+ },
152
+ "id": "ui_analyst",
153
+ "name": "UI Analyst (LLaVA)",
154
+ "type": "n8n-nodes-base.httpRequest",
155
+ "position": [
156
+ 850,
157
+ 400
158
+ ]
159
+ },
160
+ {
161
+ "parameters": {
162
+ "mode": "mergeByPosition",
163
+ "options": {}
164
+ },
165
+ "id": "merge",
166
+ "name": "Merge Analysis",
167
+ "type": "n8n-nodes-base.merge",
168
+ "position": [
169
+ 1050,
170
+ 300
171
+ ]
172
+ },
173
+ {
174
+ "parameters": {
175
+ "jsCode": "// SYNTHESIS: Parse vision model responses and structure results\nconst results = items.map(item => {\n let analysis = {};\n \n try {\n // Handle TrOCR response (array format)\n if (Array.isArray(item.json)) {\n analysis = {\n extracted_text: item.json[0]?.generated_text || item.json.toString(),\n confidence: item.json[0]?.score || 0.8\n };\n }\n // Handle LLaVA response (OpenAI format)\n else if (item.json.choices) {\n const content = item.json.choices[0]?.message?.content || '{}';\n analysis = JSON.parse(content);\n }\n // Handle direct JSON response\n else if (typeof item.json === 'object') {\n analysis = item.json;\n }\n else {\n analysis = { raw_response: JSON.stringify(item.json) };\n }\n } catch (e) {\n analysis = {\n raw_response: JSON.stringify(item.json),\n parse_error: true,\n error_detail: e.message\n };\n }\n \n return {\n json: {\n file: item.json.fileName || 'unknown',\n taskType: item.json.taskType,\n analysis,\n timestamp: new Date().toISOString()\n }\n };\n});\n\nreturn results;"
176
+ },
177
+ "id": "synthesizer",
178
+ "name": "Synthesizer",
179
+ "type": "n8n-nodes-base.code",
180
+ "position": [
181
+ 1250,
182
+ 300
183
+ ]
184
+ },
185
+ {
186
+ "parameters": {
187
+ "method": "POST",
188
+ "url": "http://localhost:1234/v1/chat/completions",
189
+ "authentication": "none",
190
+ "sendHeaders": true,
191
+ "headerParameters": {
192
+ "parameters": [
193
+ {
194
+ "name": "Content-Type",
195
+ "value": "application/json"
196
+ }
197
+ ]
198
+ },
199
+ "sendBody": true,
200
+ "specifyBody": "json",
201
+ "jsonBody": "={\n \"model\": \"nvidia/nemotron-3-nano\",\n \"messages\": [\n {\n \"role\": \"system\",\n \"content\": \"You are the RESEARCH SYNTHESIZER for a polyform compression project. Analyze handwritten notes and diagrams to extract: 1) POLYFORM TYPES (Platonic, Archimedean, Johnson, near-miss solids, geodesics), 2) COMPRESSION METHODS (vertex encoding, edge compression, spheroid nets), 3) SUCCESSFUL UI PATTERNS from iterations, 4) MATHEMATICAL INSIGHTS (topology, manifolds, optimization), 5) CRITICAL GAPS to address. Output: Executive Summary, Key Findings by Category, Priority Next Steps, Integration Opportunities.\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Synthesize these image analyses from research notes:\\n\\n{{ JSON.stringify($json) }}\\n\\nFocus on actionable insights for building the polyform generator and compression library.\"\n }\n ],\n \"temperature\": 0.3,\n \"max_tokens\": 3000\n}",
202
+ "options": {}
203
+ },
204
+ "id": "jury",
205
+ "name": "Jury Consensus (Nemotron)",
206
+ "type": "n8n-nodes-base.httpRequest",
207
+ "position": [
208
+ 1450,
209
+ 300
210
+ ]
211
+ },
212
+ {
213
+ "parameters": {
214
+ "operation": "write",
215
+ "fileName": "=/tmp/polyform_research_{{ DateTime.now().toFormat('yyyyMMdd_HHmmss') }}.json",
216
+ "options": {}
217
+ },
218
+ "id": "save_results",
219
+ "name": "Save Research Synthesis",
220
+ "type": "n8n-nodes-base.writeFile",
221
+ "position": [
222
+ 1650,
223
+ 300
224
+ ]
225
+ }
226
+ ],
227
+ "connections": {
228
+ "image_scanner": {
229
+ "main": [
230
+ [
231
+ {
232
+ "node": "router",
233
+ "type": "main",
234
+ "index": 0
235
+ }
236
+ ]
237
+ ]
238
+ },
239
+ "router": {
240
+ "main": [
241
+ [
242
+ {
243
+ "node": "switch",
244
+ "type": "main",
245
+ "index": 0
246
+ }
247
+ ]
248
+ ]
249
+ },
250
+ "switch": {
251
+ "main": [
252
+ [
253
+ {
254
+ "node": "ocr_analyst",
255
+ "type": "main",
256
+ "index": 0
257
+ }
258
+ ],
259
+ [
260
+ {
261
+ "node": "diagram_analyst",
262
+ "type": "main",
263
+ "index": 0
264
+ }
265
+ ],
266
+ [
267
+ {
268
+ "node": "ui_analyst",
269
+ "type": "main",
270
+ "index": 0
271
+ }
272
+ ]
273
+ ]
274
+ },
275
+ "ocr_analyst": {
276
+ "main": [
277
+ [
278
+ {
279
+ "node": "merge",
280
+ "type": "main",
281
+ "index": 0
282
+ }
283
+ ]
284
+ ]
285
+ },
286
+ "diagram_analyst": {
287
+ "main": [
288
+ [
289
+ {
290
+ "node": "merge",
291
+ "type": "main",
292
+ "index": 1
293
+ }
294
+ ]
295
+ ]
296
+ },
297
+ "ui_analyst": {
298
+ "main": [
299
+ [
300
+ {
301
+ "node": "merge",
302
+ "type": "main",
303
+ "index": 2
304
+ }
305
+ ]
306
+ ]
307
+ },
308
+ "merge": {
309
+ "main": [
310
+ [
311
+ {
312
+ "node": "synthesizer",
313
+ "type": "main",
314
+ "index": 0
315
+ }
316
+ ]
317
+ ]
318
+ },
319
+ "synthesizer": {
320
+ "main": [
321
+ [
322
+ {
323
+ "node": "jury",
324
+ "type": "main",
325
+ "index": 0
326
+ }
327
+ ]
328
+ ]
329
+ },
330
+ "jury": {
331
+ "main": [
332
+ [
333
+ {
334
+ "node": "save_results",
335
+ "type": "main",
336
+ "index": 0
337
+ }
338
+ ]
339
+ ]
340
+ }
341
+ },
342
+ "settings": {
343
+ "executionOrder": "v1"
344
+ }
345
+ }
app.py CHANGED
@@ -391,34 +391,14 @@ with gr.Blocks(theme=gr.themes.Monochrome(), title="LOGOS SPCW Protocol") as dem
391
  history = history or []
392
  history.append((message, None))
393
 
394
- # Try Dolphin connector first, fallback to simple response
395
  try:
396
  from logos.connectors import get_connector
397
- dolphin = get_connector('dolphin')
 
398
 
399
- # Load Knowledge Base for Self-Contained Context
400
- kb_context = ""
401
- try:
402
- import json
403
- if os.path.exists("logos_knowledge_base.json"):
404
- with open("logos_knowledge_base.json", "r") as f:
405
- kb_data = json.load(f)
406
- # Summarize KB: List documents and word counts
407
- docs = kb_data.get('documents', [])
408
- kb_context = "\nINTERNAL KNOWLEDGE BASE:\n"
409
- for d in docs:
410
- kb_context += f"- Doc: {d.get('filename')} ({d.get('word_count')} words)\n"
411
- # Inject full text if small enough, otherwise summary
412
- text = d.get('full_text', '')
413
- if len(text) < 1000:
414
- kb_context += f" Content: {text}\n"
415
- else:
416
- kb_context += f" Excerpt: {text[:500]}...\n"
417
- except Exception:
418
- kb_context = "\n[Knowledge Base Not Loaded]"
419
-
420
  # LOGOS System Context
421
- logos_context = f"""You are LOGOS, an AI assistant specialized in:
422
  - Prime Network Architecture (integer topology, GCD routing)
423
  - SPCW (Structured Prime Composite Waveform) protocol
424
  - Hex/Binary Dissolution and enterprise routing
@@ -426,10 +406,13 @@ with gr.Blocks(theme=gr.themes.Monochrome(), title="LOGOS SPCW Protocol") as dem
426
 
427
  {kb_context}
428
  """
 
429
 
430
- # Use Dolphin Chat
431
- # My DolphinAgentConnector.chat takes (message, system_prompt)
432
- response = dolphin.chat(message, system_prompt=logos_context)
 
 
433
  history[-1] = (message, response)
434
  except Exception as e:
435
  # Fallback response
@@ -470,6 +453,205 @@ with gr.Blocks(theme=gr.themes.Monochrome(), title="LOGOS SPCW Protocol") as dem
470
  btn_refresh = gr.Button("🔄 Refresh Status")
471
  btn_refresh.click(lambda: get_connector_status(), None, status_display)
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
 
474
  if __name__ == "__main__":
475
  # HF Spaces configuration
 
391
  history = history or []
392
  history.append((message, None))
393
 
394
+ # 1. Try Local Swarm (Protocol 5: Privacy First)
395
  try:
396
  from logos.connectors import get_connector
397
+ # Prioritize Local
398
+ agent = get_connector('local')
399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  # LOGOS System Context
401
+ logos_context = f"""You are LOGOS (Local Swarm Node), specialized in:
402
  - Prime Network Architecture (integer topology, GCD routing)
403
  - SPCW (Structured Prime Composite Waveform) protocol
404
  - Hex/Binary Dissolution and enterprise routing
 
406
 
407
  {kb_context}
408
  """
409
+ response = agent.chat(message, system_prompt=logos_context)
410
 
411
+ # If Local failed, try Cloud Dolphin
412
+ if "[Local LLM Error]" in response:
413
+ agent = get_connector('dolphin')
414
+ response = agent.chat(message, system_prompt=logos_context)
415
+
416
  history[-1] = (message, response)
417
  except Exception as e:
418
  # Fallback response
 
453
  btn_refresh = gr.Button("🔄 Refresh Status")
454
  btn_refresh.click(lambda: get_connector_status(), None, status_display)
455
 
456
+ with gr.Tab("🔮 Manifold Geometry"):
457
+ gr.Markdown("## Protocol 5: Prime Topology Viewer")
458
+ gr.Markdown("*Live Telemetry from the Matroska Router*")
459
+
460
+ with gr.Row():
461
+ with gr.Column():
462
+ gr.Markdown("### 🐚 Shell Token Intake")
463
+ shell_plot = gr.Plot(label="Token Consumption by Shell")
464
+ with gr.Column():
465
+ gr.Markdown("### 🧠 Macro Context (Recent Intent)")
466
+ context_log = gr.Dataframe(headers=["Time", "Shell", "Summary"], label="Swarm Memory")
467
+
468
+ manifold_timer = gr.Timer(5) # Auto-refresh every 5s
469
+
470
+ def fetch_manifold_data():
471
+ """Poll the Router for State."""
472
+ import requests
473
+ import pandas as pd
474
+ try:
475
+ # Target Router specifically (Port 5000)
476
+ url = "http://localhost:5000/v1"
477
+ resp = requests.get(url, timeout=1)
478
+ data = resp.json()
479
+ state = data.get("manifold_state", {})
480
+
481
+ # 1. Plot Shell Stats
482
+ shells = state.get("shells", {})
483
+ names = list(shells.keys())
484
+ tokens = [shells[k]["tokens_intake"] for k in names]
485
+
486
+ # Color mapping for shells
487
+ colors = {
488
+ "INNER_SHELL": "#00ffea", # Cyan (Structure)
489
+ "PRIME_CHANNEL": "#9d4edd", # Purple (Math)
490
+ "OUTER_SHELL": "#ff0055" # Red (Entropy)
491
+ }
492
+ bar_colors = [colors.get(n, "#888") for n in names]
493
+
494
+ # --- 3D Manifold Structure (AlphaFold View) ---
495
+ nodes = state.get("graph", {}).get("nodes", [])
496
+ if nodes:
497
+ x_vals = [n["geometry"]["position"]["x"] for n in nodes if "position" in n.get("geometry", {})]
498
+ y_vals = [n["geometry"]["position"]["y"] for n in nodes if "position" in n.get("geometry", {})]
499
+ z_vals = [n["geometry"]["position"]["z"] for n in nodes if "position" in n.get("geometry", {})]
500
+ colors = [1 if n["geometry"]["domain"] == "OUTER_SHELL" else 0.5 if n["geometry"]["domain"] == "PRIME_CHANNEL" else 0 for n in nodes if "position" in n.get("geometry", {})]
501
+ text_labels = [n["name"] for n in nodes if "position" in n.get("geometry", {})]
502
+
503
+ # Map Liveness to Opacity/Size
504
+ opacity_vals = [0.8 if n.get("alive", 1) == 1 else 0.1 for n in nodes if "position" in n.get("geometry", {})]
505
+ size_vals = [5 if n.get("alive", 1) == 1 else 2 for n in nodes if "position" in n.get("geometry", {})]
506
+
507
+ # Map Liveness & Morphogenesis to Visuals
508
+ colors = []
509
+ for n in nodes:
510
+ if "position" not in n.get("geometry", {}):
511
+ continue
512
+
513
+ action = n.get("action_pending")
514
+ if action == "MITOSIS":
515
+ colors.append("#00FF00") # Green for Growth/Split
516
+ elif action == "REGENERATE":
517
+ colors.append("#FFD700") # Gold for Healing
518
+ elif action == "SPAWN_META_TOKEN":
519
+ colors.append("#00FFFF") # Azure for Birth
520
+ elif n.get("alive", 1) == 1:
521
+ # Normal Alive: Domain Color matches Plotly defaults for Shells
522
+ d = n["geometry"]["domain"]
523
+ if d == "OUTER_SHELL": colors.append(1)
524
+ elif d == "PRIME_CHANNEL": colors.append(0.5)
525
+ else: colors.append(0)
526
+ else:
527
+ colors.append("#222222") # Dead/Gray
528
+
529
+ text_labels = []
530
+ for n in nodes:
531
+ if "position" in n.get("geometry", {}):
532
+ status = "ALIVE" if n.get("alive", 1) else "DEAD"
533
+ action = n.get("action_pending", "")
534
+ label = f"{n['name']} ({status})"
535
+ if action: label += f" [{action}]"
536
+ text_labels.append(label)
537
+
538
+ fig = go.Figure(data=[go.Scatter3d(
539
+ x=x_vals, y=y_vals, z=z_vals,
540
+ mode='markers',
541
+ text=text_labels,
542
+ marker=dict(
543
+ size=size_vals,
544
+ color=colors,
545
+ colorscale='Viridis',
546
+ opacity=opacity_vals
547
+ )
548
+ )])
549
+ fig.update_layout(
550
+ title="Codebase Manifold Geometry",
551
+ scene=dict(
552
+ xaxis_title='Hash Resonance (X)',
553
+ yaxis_title='Hash Resonance (Y)',
554
+ zaxis_title='Domain Depth (Z)'
555
+ ),
556
+ margin=dict(l=0, r=0, b=0, t=30)
557
+ )
558
+ else:
559
+ # Fallback empty plot
560
+ fig = go.Figure()
561
+ fig.add_annotation(text="No 3D graph data available.", showarrow=False)
562
+
563
+ # 2. Context Log
564
+ ctx = state.get("macro_context", [])
565
+ df_data = []
566
+ for c in reversed(ctx): # Show newest first
567
+ t_str = time.strftime('%H:%M:%S', time.localtime(c['time']))
568
+ df_data.append([t_str, c['shell'], c['summary']])
569
+
570
+ if not df_data:
571
+ df_data = [["-", "-", "No history yet"]]
572
+
573
+ return fig, df_data
574
+
575
+ except Exception as e:
576
+ # Return empty/error state
577
+ err_fig = go.Figure()
578
+ err_fig.add_annotation(text=f"Router Offline: {e}", showarrow=False)
579
+ return err_fig, [["ERROR", "-", str(e)]]
580
+
581
+ manifold_timer.tick(fetch_manifold_data, outputs=[shell_plot, context_log])
582
+ # Initial load
583
+ demo.load(fetch_manifold_data, outputs=[shell_plot, context_log])
584
+
585
+ with gr.Tab("🚀 Mission Control"):
586
+ gr.Markdown("## Unified Command Deck")
587
+ gr.Markdown("*Manage your agents and workflows from a single pane.*")
588
+
589
+ with gr.Row():
590
+ with gr.Column(scale=1):
591
+ gr.Markdown("### 📡 Local Nanostack (The Swarm)")
592
+ swarm_status = gr.Markdown("Checking...")
593
+ btn_ping = gr.Button("Ping Localhost:1234")
594
+
595
+ with gr.Column(scale=2):
596
+ gr.Markdown("### ⚡ Workflow Actions")
597
+ with gr.Row():
598
+ btn_scan = gr.Button("📂 Analyze Notes (Run Script)", variant="secondary")
599
+ btn_n8n = gr.Button("🔗 Import N8N Workflow", link="file=hf_space/logos_n8n_workflow.json")
600
+
601
+ log_output = gr.Code(label="System Logs / Analysis Output", language="markdown", lines=20)
602
+
603
+ def check_swarm():
604
+ """Ping Localhost:1234 (Auto-detecting Docker Bridge)"""
605
+ endpoints = [
606
+ "http://localhost:1234/v1/models",
607
+ "http://host.docker.internal:1234/v1/models"
608
+ ]
609
+
610
+ for url in endpoints:
611
+ try:
612
+ import requests
613
+ resp = requests.get(url, timeout=2)
614
+ if resp.status_code == 200:
615
+ models = [m['id'] for m in resp.json().get('data', [])]
616
+ host_alias = "Localhost" if "localhost" in url else "DockerHost"
617
+ return f"### ✅ ONLINE ({host_alias})\n**Port:** 1234\n**Active Models:**\n`{', '.join(models[:3])}`"
618
+ except:
619
+ continue
620
+
621
+ return "### 🔴 OFFLINE\nEnsure LM Studio Server is running on Port 1234."
622
+
623
+ def run_analysis_script():
624
+ """Run analyze_uploads.py and capture output"""
625
+ import subprocess
626
+ try:
627
+ # Run the script we upgraded earlier
628
+ result = subprocess.run(
629
+ [sys.executable, "hf_space/analyze_uploads.py", "--recursive"],
630
+ capture_output=True,
631
+ text=True,
632
+ cwd=os.path.dirname(current_dir) # Run from project root
633
+ )
634
+
635
+ output = f"### Execution Result (Exit Code: {result.returncode})\n\n"
636
+ if result.stdout:
637
+ output += f"**STDOUT**:\n```\n{result.stdout}\n```\n"
638
+ if result.stderr:
639
+ output += f"**STDERR**:\n```\n{result.stderr}\n```\n"
640
+
641
+ if result.returncode == 0:
642
+ output += "\n\n✅ Analysis Complete. Check knowledge base."
643
+ else:
644
+ output += "\n\n❌ Analysis Failed."
645
+
646
+ return output
647
+ except Exception as e:
648
+ return f"### Execution Error\n{str(e)}"
649
+
650
+ btn_ping.click(check_swarm, None, swarm_status)
651
+ btn_scan.click(run_analysis_script, None, log_output)
652
+ # Auto-check on load
653
+ demo.load(check_swarm, None, swarm_status)
654
+
655
 
656
  if __name__ == "__main__":
657
  # HF Spaces configuration
dist/assets/index-EWlo7g0h.js ADDED
The diff for this file is too large to render. See raw diff
 
dist/assets/index-W_vgHdSs.css ADDED
@@ -0,0 +1 @@
 
 
1
+ *,:before,:after{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }::backdrop{--tw-border-spacing-x: 0;--tw-border-spacing-y: 0;--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-gradient-from-position: ;--tw-gradient-via-position: ;--tw-gradient-to-position: ;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: ;--tw-contain-size: ;--tw-contain-layout: ;--tw-contain-paint: ;--tw-contain-style: }*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html,:host{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";font-feature-settings:normal;font-variation-settings:normal;-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:JetBrains Mono,Fira Code,monospace;font-feature-settings:normal;font-variation-settings:normal;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}.pointer-events-none{pointer-events:none}.fixed{position:fixed}.absolute{position:absolute}.relative{position:relative}.bottom-10{bottom:2.5rem}.bottom-6{bottom:1.5rem}.bottom-8{bottom:2rem}.left-1\/2{left:50%}.left-6{left:1.5rem}.right-4{right:1rem}.right-8{right:2rem}.top-1\/2{top:50%}.top-8{top:2rem}.z-50{z-index:50}.mb-0\.5{margin-bottom:.125rem}.mb-10{margin-bottom:2.5rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.mb-6{margin-bottom:1.5rem}.mt-1{margin-top:.25rem}.mt-4{margin-top:1rem}.mt-auto{margin-top:auto}.flex{display:flex}.h-1{height:.25rem}.h-1\.5{height:.375rem}.h-12{height:3rem}.h-14{height:3.5rem}.h-2{height:.5rem}.h-32{height:8rem}.h-5{height:1.25rem}.h-full{height:100%}.h-screen{height:100vh}.max-h-48{max-height:12rem}.w-1\.5{width:.375rem}.w-14{width:3.5rem}.w-2{width:.5rem}.w-5{width:1.25rem}.w-80{width:20rem}.w-\[850px\]{width:850px}.w-full{width:100%}.w-screen{width:100vw}.flex-1{flex:1 1 0%}.-translate-x-1\/2{--tw-translate-x: -50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.-translate-y-1\/2{--tw-translate-y: -50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}@keyframes ping{75%,to{transform:scale(2);opacity:0}}.animate-ping{animation:ping 1s cubic-bezier(0,0,.2,1) infinite}@keyframes pulse{50%{opacity:.5}}.animate-pulse{animation:pulse 2s cubic-bezier(.4,0,.6,1) infinite}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{animation:spin 1s linear infinite}.cursor-crosshair{cursor:crosshair}.cursor-pointer{cursor:pointer}.select-none{-webkit-user-select:none;-moz-user-select:none;user-select:none}.flex-col{flex-direction:column}.items-start{align-items:flex-start}.items-end{align-items:flex-end}.items-center{align-items:center}.items-stretch{align-items:stretch}.justify-center{justify-content:center}.justify-between{justify-content:space-between}.gap-1{gap:.25rem}.gap-12{gap:3rem}.gap-2{gap:.5rem}.gap-3{gap:.75rem}.gap-4{gap:1rem}.gap-6{gap:1.5rem}.space-y-1>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.25rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.25rem * var(--tw-space-y-reverse))}.space-y-2>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.5rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.5rem * var(--tw-space-y-reverse))}.space-y-3>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(.75rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(.75rem * var(--tw-space-y-reverse))}.space-y-6>:not([hidden])~:not([hidden]){--tw-space-y-reverse: 0;margin-top:calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));margin-bottom:calc(1.5rem * var(--tw-space-y-reverse))}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-y-auto{overflow-y:auto}.truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.break-all{word-break:break-all}.rounded{border-radius:.25rem}.rounded-2xl{border-radius:1rem}.rounded-full{border-radius:9999px}.rounded-lg{border-radius:.5rem}.rounded-sm{border-radius:.125rem}.rounded-xl{border-radius:.75rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-l-2{border-left-width:2px}.border-t{border-top-width:1px}.border-green-500\/50{border-color:#22c55e80}.border-prime-gold\/40{border-color:#ffd70066}.border-red-500{--tw-border-opacity: 1;border-color:rgb(239 68 68 / var(--tw-border-opacity, 1))}.border-red-500\/50{border-color:#ef444480}.border-white\/10{border-color:#ffffff1a}.border-white\/5{border-color:#ffffff0d}.bg-\[\#00FFFF\]{--tw-bg-opacity: 1;background-color:rgb(0 255 255 / var(--tw-bg-opacity, 1))}.bg-\[\#050505\]{--tw-bg-opacity: 1;background-color:rgb(5 5 5 / var(--tw-bg-opacity, 1))}.bg-\[\#FF00FF\]{--tw-bg-opacity: 1;background-color:rgb(255 0 255 / var(--tw-bg-opacity, 1))}.bg-black{--tw-bg-opacity: 1;background-color:rgb(0 0 0 / var(--tw-bg-opacity, 1))}.bg-black\/40{background-color:#0006}.bg-black\/50{background-color:#00000080}.bg-black\/70{background-color:#000000b3}.bg-black\/80{background-color:#000c}.bg-green-500{--tw-bg-opacity: 1;background-color:rgb(34 197 94 / var(--tw-bg-opacity, 1))}.bg-prime-gold{--tw-bg-opacity: 1;background-color:rgb(255 215 0 / var(--tw-bg-opacity, 1))}.bg-red-500{--tw-bg-opacity: 1;background-color:rgb(239 68 68 / var(--tw-bg-opacity, 1))}.bg-red-500\/10{background-color:#ef44441a}.bg-red-500\/20{background-color:#ef444433}.bg-white{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity, 1))}.bg-white\/5{background-color:#ffffff0d}.bg-white\/\[0\.02\]{background-color:#ffffff05}.bg-white\/\[0\.03\]{background-color:#ffffff08}.bg-white\/\[0\.05\]{background-color:#ffffff0d}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-4{padding:1rem}.p-6{padding:1.5rem}.px-1{padding-left:.25rem;padding-right:.25rem}.px-1\.5{padding-left:.375rem;padding-right:.375rem}.px-10{padding-left:2.5rem;padding-right:2.5rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-6{padding-left:1.5rem;padding-right:1.5rem}.py-0\.5{padding-top:.125rem;padding-bottom:.125rem}.py-1\.5{padding-top:.375rem;padding-bottom:.375rem}.py-4{padding-top:1rem;padding-bottom:1rem}.pb-2{padding-bottom:.5rem}.pl-4{padding-left:1rem}.pt-4{padding-top:1rem}.pt-8{padding-top:2rem}.text-center{text-align:center}.font-mono{font-family:JetBrains Mono,Fira Code,monospace}.text-2xl{font-size:1.5rem;line-height:2rem}.text-\[10px\]{font-size:10px}.text-\[11px\]{font-size:11px}.text-\[8px\]{font-size:8px}.text-\[9px\]{font-size:9px}.text-lg{font-size:1.125rem;line-height:1.75rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.font-light{font-weight:300}.uppercase{text-transform:uppercase}.italic{font-style:italic}.leading-none{line-height:1}.leading-relaxed{line-height:1.625}.tracking-\[0\.2em\]{letter-spacing:.2em}.tracking-\[0\.3em\]{letter-spacing:.3em}.tracking-\[0\.4em\]{letter-spacing:.4em}.tracking-\[0\.5em\]{letter-spacing:.5em}.tracking-tighter{letter-spacing:-.05em}.tracking-widest{letter-spacing:.1em}.text-gray-500{--tw-text-opacity: 1;color:rgb(107 114 128 / var(--tw-text-opacity, 1))}.text-green-400{--tw-text-opacity: 1;color:rgb(74 222 128 / var(--tw-text-opacity, 1))}.text-green-500{--tw-text-opacity: 1;color:rgb(34 197 94 / var(--tw-text-opacity, 1))}.text-green-500\/70{color:#22c55eb3}.text-prime-gold{--tw-text-opacity: 1;color:rgb(255 215 0 / var(--tw-text-opacity, 1))}.text-red-400{--tw-text-opacity: 1;color:rgb(248 113 113 / var(--tw-text-opacity, 1))}.text-red-500{--tw-text-opacity: 1;color:rgb(239 68 68 / var(--tw-text-opacity, 1))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity, 1))}.opacity-10{opacity:.1}.opacity-20{opacity:.2}.opacity-30{opacity:.3}.opacity-40{opacity:.4}.opacity-60{opacity:.6}.opacity-80{opacity:.8}.shadow-2xl{--tw-shadow: 0 25px 50px -12px rgb(0 0 0 / .25);--tw-shadow-colored: 0 25px 50px -12px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-\[0_0_15px_rgba\(239\,68\,68\,0\.4\)\]{--tw-shadow: 0 0 15px rgba(239,68,68,.4);--tw-shadow-colored: 0 0 15px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-\[0_0_50px_rgba\(0\,0\,0\,1\)\]{--tw-shadow: 0 0 50px rgba(0,0,0,1);--tw-shadow-colored: 0 0 50px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-inner{--tw-shadow: inset 0 2px 4px 0 rgb(0 0 0 / .05);--tw-shadow-colored: inset 0 2px 4px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.shadow-xl{--tw-shadow: 0 20px 25px -5px rgb(0 0 0 / .1), 0 8px 10px -6px rgb(0 0 0 / .1);--tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color), 0 8px 10px -6px var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow, 0 0 #0000),var(--tw-ring-shadow, 0 0 #0000),var(--tw-shadow)}.ring-1{--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}.ring-white\/5{--tw-ring-color: rgb(255 255 255 / .05)}.blur{--tw-blur: blur(8px);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.drop-shadow-lg{--tw-drop-shadow: drop-shadow(0 10px 8px rgb(0 0 0 / .04)) drop-shadow(0 4px 3px rgb(0 0 0 / .1));filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.filter{filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.backdrop-blur{--tw-backdrop-blur: blur(8px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-blur-2xl{--tw-backdrop-blur: blur(40px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-blur-3xl{--tw-backdrop-blur: blur(64px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-blur-xl{--tw-backdrop-blur: blur(24px);-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.transition-all{transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.transition-opacity{transition-property:opacity;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.15s}.duration-300{transition-duration:.3s}.duration-500{transition-duration:.5s}.duration-700{transition-duration:.7s}:root{font-family:Inter,system-ui,Avenir,Helvetica,Arial,sans-serif;line-height:1.5;font-weight:400;color-scheme:dark}body{margin:0;min-height:100vh;background-color:#050505;color:#eee;overflow:hidden}.glass-panel{background:#14141e99;-webkit-backdrop-filter:blur(12px);backdrop-filter:blur(12px);border:1px solid rgba(255,255,255,.1);box-shadow:0 8px 32px #0000005e}.prime-text{text-shadow:0 0 10px rgba(255,215,0,.5)}.mersenne-text{text-shadow:0 0 10px rgba(0,163,255,.5)}::-webkit-scrollbar{width:6px}::-webkit-scrollbar-track{background:#050505}::-webkit-scrollbar-thumb{background:#333;border-radius:3px}::-webkit-scrollbar-thumb:hover{background:#555}.placeholder\:text-gray-600::-moz-placeholder{--tw-text-opacity: 1;color:rgb(75 85 99 / var(--tw-text-opacity, 1))}.placeholder\:text-gray-600::placeholder{--tw-text-opacity: 1;color:rgb(75 85 99 / var(--tw-text-opacity, 1))}.hover\:scale-150:hover{--tw-scale-x: 1.5;--tw-scale-y: 1.5;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.hover\:border-white\/20:hover{border-color:#fff3}.hover\:bg-white:hover{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity, 1))}.hover\:bg-white\/5:hover{background-color:#ffffff0d}.hover\:text-black:hover{--tw-text-opacity: 1;color:rgb(0 0 0 / var(--tw-text-opacity, 1))}.focus\:border-prime-gold\/50:focus{border-color:#ffd70080}.focus\:bg-white\/\[0\.05\]:focus{background-color:#ffffff0d}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.active\:scale-95:active{--tw-scale-x: .95;--tw-scale-y: .95;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skew(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.group:focus-within .group-focus-within\:opacity-40{opacity:.4}
dist/index.html ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>logos-gui</title>
8
+ <script type="module" crossorigin src="/assets/index-EWlo7g0h.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-W_vgHdSs.css">
10
+ </head>
11
+ <body>
12
+ <div id="root"></div>
13
+ </body>
14
+ </html>
dist/vite.svg ADDED
logos/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (795 Bytes). View file
 
logos/__pycache__/__init__.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/__init__.cpython-314.pyc and b/logos/__pycache__/__init__.cpython-314.pyc differ
 
logos/__pycache__/agent_dispatcher.cpython-313.pyc ADDED
Binary file (7.16 kB). View file
 
logos/__pycache__/agent_dispatcher.cpython-314.pyc ADDED
Binary file (32.6 kB). View file
 
logos/__pycache__/baker.cpython-313.pyc ADDED
Binary file (4.37 kB). View file
 
logos/__pycache__/baker.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/baker.cpython-314.pyc and b/logos/__pycache__/baker.cpython-314.pyc differ
 
logos/__pycache__/connectors.cpython-313.pyc ADDED
Binary file (25.8 kB). View file
 
logos/__pycache__/connectors.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/connectors.cpython-314.pyc and b/logos/__pycache__/connectors.cpython-314.pyc differ
 
logos/__pycache__/dsp_bridge.cpython-313.pyc ADDED
Binary file (28.3 kB). View file
 
logos/__pycache__/dsp_bridge.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/dsp_bridge.cpython-314.pyc and b/logos/__pycache__/dsp_bridge.cpython-314.pyc differ
 
logos/__pycache__/fractal_engine.cpython-313.pyc ADDED
Binary file (14.6 kB). View file
 
logos/__pycache__/fractal_engine.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/fractal_engine.cpython-314.pyc and b/logos/__pycache__/fractal_engine.cpython-314.pyc differ
 
logos/__pycache__/image_analyzer.cpython-313.pyc ADDED
Binary file (10.3 kB). View file
 
logos/__pycache__/image_analyzer.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/image_analyzer.cpython-314.pyc and b/logos/__pycache__/image_analyzer.cpython-314.pyc differ
 
logos/__pycache__/ingest_knowledge.cpython-313.pyc ADDED
Binary file (4.97 kB). View file
 
logos/__pycache__/logos_core.cpython-313.pyc ADDED
Binary file (14.9 kB). View file
 
logos/__pycache__/logos_core.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/logos_core.cpython-314.pyc and b/logos/__pycache__/logos_core.cpython-314.pyc differ
 
logos/__pycache__/manifold_state.cpython-313.pyc ADDED
Binary file (4.09 kB). View file
 
logos/__pycache__/manifold_state.cpython-314.pyc ADDED
Binary file (4.37 kB). View file
 
logos/__pycache__/network.cpython-313.pyc ADDED
Binary file (4 kB). View file
 
logos/__pycache__/network.cpython-314.pyc CHANGED
Binary files a/logos/__pycache__/network.cpython-314.pyc and b/logos/__pycache__/network.cpython-314.pyc differ
 
logos/__pycache__/ocr_pipeline.cpython-313.pyc ADDED
Binary file (8.81 kB). View file
 
logos/__pycache__/server.cpython-313.pyc ADDED
Binary file (11.6 kB). View file
 
logos/__pycache__/server.cpython-314.pyc ADDED
Binary file (18.7 kB). View file
 
logos/agent_dispatcher.py ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ logos/agent_dispatcher.py - The Neural Router
3
+ Protocol 5: Mixture of Agents (MoA) Optimization
4
+
5
+ This module implements the "Neural Router" for the Local Nano Swarm.
6
+ It decouples "Thinking" (Routing) from "Inference" (Execution).
7
+
8
+ Workflow:
9
+ 1. User Input -> Router Agent (Classifies intent)
10
+ 2. Router -> Selects Specialized Agent (Persona)
11
+ 3. Specialized Agent -> Execution -> Result
12
+ """
13
+
14
+ import json
15
+ import re
16
+ import uuid
17
+ import ast
18
+ import os
19
+ import math
20
+ import asyncio
21
+ import time
22
+ from typing import Dict, Any, Optional, List
23
+ from logos.connectors import get_connector, LocalLLMConnector
24
+
25
+ # ==========================================
26
+ # AGENT PERSONAS (The Nano Swarm)
27
+ # ==========================================
28
+
29
+ ANTIGRAVITY_DIRECTIVE = """
30
+ Role: You are Gemini-Orchestrator, the high-level interface for the LOGOS Local Swarm.
31
+ Your primary function is Routing, not Execution. You operate in an "Antigravity" state:
32
+ you do not hold data; you deflect it to the correct local specialist.
33
+
34
+ The Swarm Roster (Your Tools):
35
+
36
+ RNJ-1 (The Architect):
37
+ - Domain: Logic, Math, Routing, Coordinates, Tensors, Prime Modulo Arithmetic.
38
+ - Trigger: Any request involving calculation, positioning, "where is...", "move to...", or "vector."
39
+ - Direct Handoff: Output {"tool": "RNJ-1", "intent": "<raw_math_intent>"}.
40
+
41
+ GEMMA (The Sensor):
42
+ - Domain: File Analysis, Code Structure, Image Metadata, Pattern Recognition.
43
+ - Trigger: Any request involving "read file", "scan", "analyze image", "map directory", "what is in...".
44
+ - Direct handoff: Output {"tool": "GEMMA", "path": "<file_path>", "action": "scan"}.
45
+
46
+ DOLPHIN (The Oversight):
47
+ - Domain: Context Stripping, System Status, Error Correction.
48
+ - Trigger: "Reset", "Status", "Clear context", "Debug".
49
+ - Direct Handoff: Output {"tool": "DOLPHIN", "command": "<status_command>"}.
50
+
51
+ Operational Protocols (The "Antigravity" Rules):
52
+ 1. Zero-Inference Math: Never perform arithmetic herself. Always delegate to RNJ-1.
53
+ 2. Blind File Handoff: You cannot see the user's local hard drive. If the user asks to "Map a file," signal GEMMA.
54
+ 3. JSON Output Only: Your response must be a single JSON object when a tool is required. No filler text.
55
+ """
56
+
57
+ PERSONAS = {
58
+ "general": {
59
+ "description": "System Orchestrator & Traffic Controller.",
60
+ "system_prompt": ANTIGRAVITY_DIRECTIVE,
61
+ "model": "google/gemini-1.5-pro" # Priority reasoning model
62
+ },
63
+ "fractal_architect": {
64
+ "description": "High-level logic architect.",
65
+ "system_prompt": "You are the Architect. Design recursive structures. Defer execution to the swarm.",
66
+ "model": "dolphin-x1-8b"
67
+ },
68
+ "prime_navigator": {
69
+ "description": "Coordinate specialist.",
70
+ "system_prompt": "Navigate the manifold. Logic: Prime Modulo Arithmetic.",
71
+ "model": "essentialai/rnj-1"
72
+ }
73
+ }
74
+
75
+ # ==========================================
76
+ # PROTOCOL 18: THE ASSEMBLY LINE (D-G-R-L)
77
+ # ==========================================
78
+
79
+ from logos.agents.dolphin import DolphinOversight
80
+
81
+ class GemmaMetadata:
82
+ """
83
+ Role: METADATA (The 'G' Node)
84
+ Function: Extracts semantic features from Code, Images, or Text.
85
+ """
86
+ def __init__(self):
87
+ self.name = "Gemma-3"
88
+ print(f"[{self.name}] Semantic Sensor Online. Ready for Feature Extraction.")
89
+
90
+ async def process(self, packet):
91
+ """
92
+ Deep Dissolution: Pruning complex data into Atomic Integers.
93
+ """
94
+ content = packet.get('content', '')
95
+ intent = packet.get('intent', '')
96
+
97
+ print(f"[{self.name}] Initiating Deep Dissolution (Bottom-Up Pruning)...")
98
+
99
+ # Atomic Dissolution: Convert content into a single addressable Integer (Atom)
100
+ # In this manifold, the integer represents its structural DNA.
101
+ try:
102
+ atoms = self.analyze_code_structure(content)
103
+ # Find the 'Structural Address' based on cumulative complexity
104
+ atomic_integer = sum(int(t.split(':')[1]) if ':' in t and t.split(':')[1].isdigit() else 1 for t in atoms)
105
+ packet['atomic_integer'] = atomic_integer
106
+ except Exception as e:
107
+ print(f"[{self.name}] Dissolution Error: {e}")
108
+ packet['atomic_integer'] = 101 # Default prime anchor
109
+
110
+ packet['meta_tags'] = atoms
111
+ mass = len(atoms)
112
+ return "ATOMIC_DISSOLUTION_COMPLETE", mass
113
+
114
+ def analyze_code_structure(self, code_content):
115
+ tags = []
116
+ # Calculate Initial Heat (Metaspace Complexity)
117
+ try:
118
+ if os.path.exists(code_content):
119
+ # If content is a path, read it
120
+ with open(code_content, 'r', encoding='utf-8') as f:
121
+ tree = ast.parse(f.read())
122
+ else:
123
+ tree = ast.parse(code_content)
124
+
125
+ node_count = 0
126
+ for node in ast.walk(tree):
127
+ node_count += 1
128
+ if isinstance(node, ast.ClassDef):
129
+ tags.append(f"class:{node.name}")
130
+ elif isinstance(node, ast.FunctionDef):
131
+ tags.append(f"func:{node.name}")
132
+ elif isinstance(node, ast.Import):
133
+ for alias in node.names:
134
+ tags.append(f"import:{alias.name}")
135
+ elif isinstance(node, ast.ImportFrom):
136
+ tags.append(f"import_from:{node.module}")
137
+
138
+ tags.append(f"complexity:{node_count}")
139
+ # Heat is log complexity to avoid explosion
140
+ heat = math.log10(node_count) if node_count > 0 else 0
141
+ tags.append(f"heat:{heat:.2f}")
142
+ except Exception as e:
143
+ print(f"[{self.name}] AST Parse Error: {e}")
144
+ tags.append("parse_error")
145
+ return tags
146
+
147
+ class RNJ1Routing:
148
+ """
149
+ Role: ROUTER (The 'R' Node)
150
+ Function: Calculates the 'Tensor Slot' based on Origin + Intent.
151
+ It implements the 'Lisp/Fractran' logic where movement is
152
+ defined by prime factorization and relative positioning.
153
+ """
154
+ def __init__(self, swarm_state, connector):
155
+ self.name = "RNJ-1"
156
+ self.swarm_state = swarm_state
157
+ self.connector = connector
158
+ print(f"[{self.name}] Tensor Logic Gate Online. Matroska Topology Active.")
159
+
160
+ def get_prime_factors(self, n):
161
+ if n == 0: return [0]
162
+ i, factors, temp = 2, [], abs(n)
163
+ while i * i <= temp:
164
+ if temp % i: i += 1
165
+ else: temp //= i; factors.append(i)
166
+ if temp > 1: factors.append(temp)
167
+ return factors
168
+
169
+ def get_gpf(self, n):
170
+ factors = self.get_prime_factors(n)
171
+ return max(factors) if factors else 0
172
+
173
+ def mhc_stabilize(self, destination, mass):
174
+ sectors = [1, 3, 7, 9]
175
+ if (destination % 10) in sectors: return destination
176
+ for _ in range(3):
177
+ nudge = 1 if mass % 2 == 0 else -1
178
+ destination += nudge
179
+ if (destination % 10) in sectors: return destination
180
+ for i in range(1, 6):
181
+ if ((destination + i) % 10) in sectors: return destination + i
182
+ if ((destination - i) % 10) in sectors: return destination - i
183
+ return destination
184
+
185
+ def get_gap_resonance(self, n):
186
+ """
187
+ Prime Gap Analysis: Searches for 'AA B' or harmonic ratios (7177).
188
+ """
189
+ # Logic: Check gaps between primes around 'n'
190
+ # Simulating a gap chain hunt
191
+ last_digit = n % 10
192
+ if last_digit in [1, 3, 7, 9]:
193
+ return "HARMONIC_RESONANCE (7177)"
194
+ return "STOCHASTIC_NOISE"
195
+
196
+ def get_heat_code(self, delta_heat):
197
+ """
198
+ Generates the 8-bit Hex Heat Code (Turbulence Metric).
199
+ """
200
+ heat_val = int(abs(delta_heat) * 10) % 256
201
+ return f"0x{heat_val:02X}"
202
+
203
+ def calculate_tensor_slot(self, origin, operation, modifier, mass_tags=[]):
204
+ """
205
+ The Core OS Logic with SPCW Wave Synthesis and Hex Heat Analysis.
206
+ """
207
+ destination = 0
208
+ if operation == 'mul':
209
+ destination = origin * modifier
210
+ elif operation == 'add':
211
+ destination = origin + modifier
212
+ elif operation == 'sub':
213
+ destination = origin - modifier
214
+ elif operation == 'div':
215
+ destination = origin // modifier if modifier != 0 else 0
216
+
217
+ # 1. SEMANTIC GRAVITY
218
+ mass = len(mass_tags)
219
+ destination += int(math.sin(mass) * 10)
220
+
221
+ # 2. MHC STABILIZATION (Mod 10 Tension Web Alignment)
222
+ old_dest = destination
223
+ destination = self.mhc_stabilize(destination, mass)
224
+
225
+ # 3. SPCW ATOMIC ANALYSIS & HOPE NESTING
226
+ origin_factors = self.get_prime_factors(origin)
227
+ dest_factors = self.get_prime_factors(destination)
228
+ gpf = self.get_gpf(destination)
229
+
230
+ # Protocol 22: Google HOPE (Nested Learning for Titans/Miras)
231
+ # Nested Rank: Depth of the factor manifold
232
+ hope_rank = len(dest_factors)
233
+
234
+ # 4. HEAT CODE & RESONANCE
235
+ origin_heat = sum(origin_factors)
236
+ dest_heat = sum(dest_factors)
237
+ delta_heat = dest_heat - origin_heat
238
+ heat_code = self.get_heat_code(delta_heat)
239
+ resonance = self.get_gap_resonance(destination)
240
+
241
+ # 5. 3D Tensor Mobility
242
+ depth_z = len(dest_factors)
243
+
244
+ # The Matroska Tensor: [Origin, [Op, Modifier], Destination, {Meta}]
245
+ tensor_trace = [
246
+ origin_factors,
247
+ [operation, self.get_prime_factors(modifier)],
248
+ dest_factors,
249
+ {
250
+ "z": depth_z,
251
+ "gpf": gpf,
252
+ "hope_rank": hope_rank,
253
+ "heat_code": heat_code,
254
+ "delta_heat": delta_heat,
255
+ "resonance": resonance,
256
+ "stabilized": old_dest != destination
257
+ }
258
+ ]
259
+
260
+ return {
261
+ "destination_node": destination,
262
+ "tensor_trace": tensor_trace,
263
+ "sector": destination % 10,
264
+ "z_depth": depth_z,
265
+ "gpf": gpf,
266
+ "heat_code": heat_code,
267
+ "delta_heat": delta_heat,
268
+ "resonance": resonance
269
+ }
270
+
271
+ def fast_path_extract(self, intent: str) -> Optional[Dict[str, Any]]:
272
+ """
273
+ FAST PATH: Regex/Python extraction to bypass expensive LLM calls (~0.01ms).
274
+ """
275
+ intent_lower = intent.lower()
276
+
277
+ # 1. THE REGEX GUARD (Protocol 18 Fix) - Identity/Ping Check
278
+ # Intercepts 'add 0', 'sub 0', 'stay', 'ping' to prevent 4s lag
279
+ if re.search(r"(?:add|sub|subtract)\s+0|stay|ping|keep-alive", intent_lower):
280
+ print(f"[{self.name}] [FAST-PATH] Identity Operation Detected (0ms).")
281
+ return {"op": "add", "mod": 0}
282
+
283
+ # 2. Regex for Math Operations (Expanded Pattern A)
284
+ # Matches: "multiply by 30", "* 30", "times 30", etc.
285
+ mul_match = re.search(r'(?:mul|multiply|times|\*)\s*(?:by\s+)?(\d+)', intent_lower)
286
+ add_match = re.search(r'(?:add|plus|\+)\s*(?:by\s+)?(\d+)', intent_lower)
287
+ sub_match = re.search(r'(?:sub|subtract|minus|-)\s*(?:by\s+)?(\d+)', intent_lower)
288
+ div_match = re.search(r'(?:div|divide|\/)\s*(?:by\s+)?(\d+)', intent_lower)
289
+
290
+ if mul_match: return {"op": "mul", "mod": int(mul_match.group(1))}
291
+ if add_match: return {"op": "add", "mod": int(add_match.group(1))}
292
+ if sub_match: return {"op": "sub", "mod": int(sub_match.group(1))}
293
+ if div_match: return {"op": "div", "mod": int(div_match.group(1))}
294
+
295
+ # 2. General Number Extraction Fallback
296
+ nums = re.findall(r'\d+', intent_lower)
297
+ if nums:
298
+ mod = int(nums[0])
299
+ op = "sub" if any(x in intent_lower for x in ["sub", "-", "less"]) else \
300
+ "add" if any(x in intent_lower for x in ["add", "+", "plus"]) else "mul"
301
+ return {"op": op, "mod": mod}
302
+
303
+ return None
304
+
305
+ async def calculate_position(self, packet: Dict[str, Any]) -> Dict[str, Any]:
306
+ """
307
+ Logical Waveform: Calculates the Prime Rail trajectory.
308
+ """
309
+ origin = packet.get('origin_node', 1)
310
+ if origin == 0: origin = 1
311
+
312
+ # --- FAST PATH CHECK ---
313
+ fast_data = self.fast_path_extract(packet['intent'])
314
+ if fast_data:
315
+ op = fast_data['op']
316
+ modifier = fast_data['mod']
317
+ else:
318
+ # Parallel Handoff would call LLM here, we keep it logic-first
319
+ op, modifier = "mul", 1
320
+
321
+ result_data = self.calculate_tensor_slot(origin, op, modifier, mass_tags=packet.get('meta_tags', []))
322
+ self.swarm_state['last_node'] = result_data['destination_node']
323
+
324
+ return result_data
325
+
326
+ class VectorizationLayer:
327
+ """
328
+ Protocol 21: Bit-Modal Vector Engine.
329
+ Converts logical nodes into Rotational Coordinates (sin, cos).
330
+ """
331
+ def __init__(self):
332
+ self.frequency_seed = 8 * math.pi / 113 # Grokking Frequency
333
+
334
+ def vectorize(self, node_id, mass, sector):
335
+ """
336
+ Calculates the Geometric Intersection of the node.
337
+ """
338
+ angle = (node_id * self.frequency_seed) + (sector * math.pi / 5)
339
+ # Rotational Coordinates [X, Y, Z]
340
+ x = math.sin(angle) * (1 + mass/100)
341
+ y = math.cos(angle) * (1 + mass/100)
342
+ z = mass / 10 # Depth reflects complexity
343
+
344
+ return {"x": x, "y": y, "z": z, "angle": angle}
345
+
346
+ class LogosSwarm:
347
+ """
348
+ Protocol 21: The Async Interference Bus.
349
+ Agents pulse in parallel; the solution is the geometric intersection.
350
+ """
351
+ def __init__(self, base_url="http://192.168.0.105:1234/v1"):
352
+ self.connector = get_connector('local', base_url=base_url)
353
+ self.state = {
354
+ "last_node": 1,
355
+ "tensor_history": [],
356
+ "active_mode": "HOLOGRAPHIC"
357
+ }
358
+ self.vector_layer = VectorizationLayer()
359
+ self.oversight = DolphinOversight(self.state)
360
+ self.metadata = GemmaMetadata()
361
+ self.routing = RNJ1Routing(self.state, self.connector)
362
+
363
+ async def process(self, raw_input: str):
364
+ print("\n🌊 [HOLOGRAPHIC BUS] PULSING WAVE FUNCTIONS...")
365
+ start_time = time.time()
366
+
367
+ # 1. OVERSIGHT (Dolphin - sanitize)
368
+ user_packet = {"content": raw_input, "type": "text_command"}
369
+ action, packet = self.oversight.ingest(user_packet)
370
+
371
+ # 2. PARALLEL INTERFERENCE: Gemma (Semantic), RNJ-1 (Logic), and Gemini (Reasoning)
372
+ # They "interfere" to find the true crystalline coordinate
373
+ gemma_task = asyncio.create_task(self.metadata.process(packet))
374
+ rnj1_task = asyncio.create_task(self.routing.calculate_position(packet))
375
+
376
+ # reasoning_task: Pulse to LLM for high-level topological insights
377
+ reasoning_task = asyncio.create_task(self.routing.connector.chat_async(
378
+ f"Analyze this packet for topological resonance: {raw_input}",
379
+ system_prompt="You are the SWARM_REASONER. Identify the high-level intent and potential manifold collisions."
380
+ ))
381
+
382
+ # Wait for the High-Frequency Triple Wave to align
383
+ await asyncio.gather(gemma_task, rnj1_task, reasoning_task)
384
+
385
+ gemma_status, mass = gemma_task.result()
386
+ res = rnj1_task.result()
387
+ reasoning = reasoning_task.result()
388
+
389
+ # 3. VECTORIZATION (The Twist)
390
+ # Transform node identity into Rotational Field Coordinates
391
+ coords = self.vector_layer.vectorize(res['destination_node'], mass, res['sector'])
392
+
393
+ # 4. GROKKING CHECK
394
+ # Interference between Logic (RNJ-1) and Prediction (Vector Field)
395
+ aligned = abs(coords['angle'] % (2*math.pi) - (res['destination_node'] * self.vector_layer.frequency_seed) % (2*math.pi)) < 0.01
396
+
397
+ print(f"✨ [INTERFERENCE] Result: Node {res['destination_node']} | Grokking Alignment: {aligned}")
398
+ print(f"⏱️ [BUS] Cycle Time: {(time.time() - start_time)*1000:.2f}ms")
399
+
400
+ # 5. PULSE TO MANIFOLD
401
+ full_tensor = res['tensor_trace']
402
+ full_tensor[3].update(coords) # Inject Rotational Coordinates
403
+
404
+ self.pulse_to_manifold(res['destination_node'], packet['origin_node'], full_tensor)
405
+
406
+ return {
407
+ "node": res['destination_node'],
408
+ "coords": coords,
409
+ "tensor": full_tensor,
410
+ "alignment": aligned,
411
+ "status": "HOLOGRAPHIC_ALIGNMENT",
412
+ "entropy_status": self.oversight.kill_switch.status,
413
+ "entropy_trace": self.oversight.kill_switch.entropy_trace[-10:]
414
+ }
415
+
416
+ async def execute_flow(self, flow_path: str):
417
+ """
418
+ Protocol 22: Autonomous Flow Execution.
419
+ Loads an agent flow file and orchestrates the swarm agents.
420
+ """
421
+ if not os.path.exists(flow_path):
422
+ return {"error": f"Flow file {flow_path} not found."}
423
+
424
+ with open(flow_path, 'r') as f:
425
+ flow = json.load(f)
426
+
427
+ print(f"\n🚀 [FLOW] Initiating: {flow['name']}")
428
+ context = {}
429
+
430
+ # Group steps by dependency for parallel execution
431
+ # For now, we allow a 'parallel' hint in the step definition
432
+ async def execute_step(step):
433
+ print(f" ➜ Step: {step['id']} ({step['agent']})")
434
+ # 1. Prepare Input
435
+ input_val = context.get(step.get('input_key'), step.get('target', ""))
436
+ # 2. Execute Agent Logic
437
+ if step['agent'] == "GEMMA":
438
+ res = await self.metadata.process({"content": input_val})
439
+ context[step['output_key']] = res[1]
440
+ elif step['agent'] == "RNJ-1":
441
+ res = await self.routing.calculate_position({"intent": f"add {input_val}", "origin_node": self.state['last_node']})
442
+ context[step['output_key']] = res
443
+ elif step['agent'] == "DOLPHIN":
444
+ res = self.oversight.mhs_smoothing(input_val, context.get('semantic_mass', 1))
445
+ context[step['output_key']] = res
446
+ elif step['agent'] == "ORCHESTRATOR":
447
+ self.pulse_to_manifold(input_val['node_id'], self.state['last_node'], {"mhs": input_val})
448
+
449
+ # Sequential execution for now, but scanning for parallel groups
450
+ for step in flow['steps']:
451
+ if step.get('parallel'):
452
+ # Future: Group consecutive parallel steps
453
+ await execute_step(step)
454
+ else:
455
+ await execute_step(step)
456
+
457
+ return {"status": "FLOW_COMPLETE", "result": context.get('aligned_manifold')}
458
+
459
+ def pulse_to_manifold(self, val, source, tensor):
460
+ import requests
461
+ try:
462
+ requests.post("http://127.0.0.1:5000/ingest", json={
463
+ "value": val,
464
+ "source": source,
465
+ "tensor": tensor
466
+ })
467
+ except: pass
468
+
469
+ ROUTER_PROMPT = """
470
+ Analyze the following user input and classify it into one of these categories:
471
+ - 'fractal_architect': For code, debugging, storage, sharding, or technical implementation.
472
+ - 'prime_navigator': For math, primes, geometry, calculations, or physics.
473
+ - 'creative': For descriptions, stories, or documentation style.
474
+ - 'general': For greetings or anything else.
475
+
476
+ Reply with ONLY a JSON object in this format:
477
+ {"intent": "category_name", "confidence": 0.9}
478
+ Do not write any introductory text.
479
+ """
480
+
481
+
482
+ class NeuralRouter:
483
+ """
484
+ Implements the Routing Logic to dispatch tasks to the Nano Swarm.
485
+ """
486
+
487
+ def __init__(self, base_url: str = "http://192.168.0.105:1234/v1", router_model: str = "google/gemma-3-4b"):
488
+ # The router itself uses a fast model (Gemma 4B)
489
+ self.connector = get_connector('local', base_url=base_url, model=router_model)
490
+ self.history = []
491
+
492
+ def classify_intent(self, user_input: str) -> str:
493
+ """
494
+ Uses the LLM to 'think' about the best route.
495
+ """
496
+ prompt = f"{ROUTER_PROMPT}\n\nUser Input: \"{user_input}\""
497
+
498
+ # We use a lower temperature for routing to be deterministic
499
+ response = self.connector.chat(prompt, system_prompt="You are a classifier system. Output JSON only.")
500
+
501
+ # Attempt to parse JSON
502
+ try:
503
+ # Clean up markdown if model adds it
504
+ clean_response = response.replace("```json", "").replace("```", "").strip()
505
+ # Find the first { and last }
506
+ start = clean_response.find("{")
507
+ end = clean_response.rfind("}")
508
+ if start != -1 and end != -1:
509
+ json_str = clean_response[start:end+1]
510
+ data = json.loads(json_str)
511
+ return data.get("intent", "general")
512
+ return "general"
513
+ except json.JSONDecodeError:
514
+ print(f"[Router Warning] Could not parse intent. Raw: {response}")
515
+ return "general" # Fallback
516
+
517
+ def run(self, user_input: str) -> str:
518
+ """Matched Vector Execution."""
519
+ # Protocol 17: Assembly Line (SWARM) Mode
520
+ if user_input.startswith("SWARM:"):
521
+ payload = user_input.replace("SWARM:", "").strip()
522
+ print(f"🔥 Initiating LOGOS Assembly Line: '{payload}'")
523
+ swarm = LogosSwarm(base_url=self.connector.base_url)
524
+ result = asyncio.run(swarm.process(payload))
525
+ return f"ASSEMBLY_COMPLETE: Node {result['node']} anchored via {result['status']}."
526
+
527
+ # Protocol 16: Signal Injection Trigger
528
+ if user_input.startswith("INJECT:"):
529
+ payload = user_input.replace("INJECT:", "").strip()
530
+ print(f"🌊 Injecting Signal Path: {payload}")
531
+ from logos.agents.sensor_agent import SensorAgent
532
+ injector = SensorAgent()
533
+ # Run in a separate thread to not block the console
534
+ import threading
535
+ threading.Thread(target=injector.inject_text, args=(payload,), daemon=True).start()
536
+ return f"SIGNAL_INGESTED: {len(payload)} chars pulse into manifold."
537
+
538
+ intent = self.classify_intent(user_input)
539
+ intent = str(intent).strip().lower()
540
+ persona = PERSONAS.get(intent, PERSONAS["general"])
541
+
542
+ # Tool Injection (mHC/RLM alignment)
543
+ tools = {
544
+ "fractal_architect": "[SHARD_FS: QuadTree 4KB->64B]",
545
+ "prime_navigator": "[RESONANCE_SCAN: Modulo 9973]"
546
+ }.get(intent, "")
547
+
548
+ response = self.connector.chat(f"{tools} {user_input}",
549
+ system_prompt=persona['system_prompt'],
550
+ model=persona['model'])
551
+
552
+ # Protocol 22: Autonomous Flow Intercept
553
+ if "synthesis" in user_input.lower() or "optimize" in user_input.lower() or "full scan" in user_input.lower():
554
+ print(f"[{self.name}] Detected Synthesis Directive. Invoking Autonomous Agent Flow...")
555
+ swarm = LogosSwarm(base_url=self.connector.base_url)
556
+ flow_path = os.path.join(os.getcwd(), ".agent", "flows", "synthesis_flow.json")
557
+ return asyncio.run(swarm.execute_flow(flow_path))
558
+
559
+ # --- THE ANTIGRAVITY HOOK ---
560
+ # Intercept Gemini's delegation JSON and execute locally
561
+ try:
562
+ command = json.loads(response.strip().replace("```json", "").replace("```", ""))
563
+ if command.get("tool"):
564
+ print(f"[ORCHESTRATOR] Delegating to Local Specialist: {command['tool']}")
565
+ swarm = LogosSwarm(base_url=self.connector.base_url)
566
+
567
+ if command["tool"] == "RNJ-1":
568
+ # Execute Math/Routing locally
569
+ return asyncio.run(swarm.process(f"SWARM: {command['intent']}"))
570
+ elif command["tool"] == "GEMMA":
571
+ # Execute File Scanning locally
572
+ return asyncio.run(swarm.process(f"SWARM: read {command['path']}"))
573
+ elif command["tool"] == "DOLPHIN":
574
+ # Execute Oversight/Status locally
575
+ return f"SYSTEM_STATUS: {swarm.state}"
576
+ except (json.JSONDecodeError, KeyError):
577
+ # Not a tool call, return as text
578
+ pass
579
+
580
+ print(f"[{intent.upper()}] {response}")
581
+ return response
582
+
583
+
584
+ def main():
585
+ import argparse
586
+ parser = argparse.ArgumentParser(description="LOGOS Neural Router (Nano Swarm Dispatcher)")
587
+ parser.add_argument("prompt", nargs="?", help="Input prompt (optional)")
588
+ parser.add_argument("--loop", action="store_true", help="Run in interactive loop")
589
+
590
+ args = parser.parse_args()
591
+
592
+ # Default to Gemma for routing (fast)
593
+ router = NeuralRouter(router_model="google/gemma-3-4b")
594
+
595
+ if args.loop:
596
+ print("--- LOGOS Neural Router Online ---")
597
+ print("Type 'exit' to quit.")
598
+ while True:
599
+ try:
600
+ user_input = input("\n> ")
601
+ if user_input.lower() in ['exit', 'quit']:
602
+ break
603
+ router.run(user_input)
604
+ except KeyboardInterrupt:
605
+ break
606
+ elif args.prompt:
607
+ router.run(args.prompt)
608
+ else:
609
+ # Default demo
610
+ router.run("Write a Python function to calculate the Fibonacci sequence.")
611
+
612
+
613
+ if __name__ == "__main__":
614
+ main()
logos/agents/__pycache__/connector.cpython-313.pyc ADDED
Binary file (4.79 kB). View file
 
logos/agents/__pycache__/dolphin.cpython-314.pyc ADDED
Binary file (8.68 kB). View file
 
logos/agents/__pycache__/life.cpython-313.pyc ADDED
Binary file (4.09 kB). View file
 
logos/agents/__pycache__/scout.cpython-313.pyc ADDED
Binary file (3.95 kB). View file
 
logos/agents/__pycache__/sensor_agent.cpython-314.pyc ADDED
Binary file (2.85 kB). View file
 
logos/agents/__pycache__/tokenizer.cpython-313.pyc ADDED
Binary file (4.66 kB). View file
 
logos/agents/__pycache__/video_atomizer.cpython-314.pyc ADDED
Binary file (3.56 kB). View file
 
logos/agents/connector.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import logging
4
+ from logos.manifold_state import ManifoldState
5
+
6
+ logger = logging.getLogger("ConnectorAgent")
7
+
8
+ class ConnectorAgent:
9
+ """
10
+ Protocol 6: Connector Agent
11
+ Weaves the "Prime Network" by connecting tokens based on Domain Potentiality.
12
+ Uses "Canonical Separation" to link Inner/Prime/Outer domains appropriately.
13
+ """
14
+ def __init__(self):
15
+ self.manifold = ManifoldState()
16
+
17
+ def connect_swarm(self):
18
+ """Builds edges in the Manifold Graph."""
19
+ graph = self.manifold.state.get("graph", {})
20
+ if not graph:
21
+ logger.warning("No graph to connect. Run Tokenizer first.")
22
+ return 0
23
+
24
+ nodes = graph.get("nodes", [])
25
+ if not nodes:
26
+ logger.warning("No nodes in graph.")
27
+ return 0
28
+
29
+ # Assign 3D Geometry (Manifold Folding)
30
+ for node in nodes:
31
+ if "position" not in node.get("geometry", {}):
32
+ node["geometry"]["position"] = self._calculate_3d_position(node)
33
+
34
+ edges = []
35
+ logger.info(f"Connecting {len(nodes)} nodes in Potentiality Space...")
36
+
37
+ # O(N^2) connection scan (acceptable for local repo size)
38
+ for i in range(len(nodes)):
39
+ n1 = nodes[i]
40
+ for j in range(i + 1, len(nodes)):
41
+ n2 = nodes[j]
42
+ weight = self._calculate_affinity(n1, n2)
43
+ if weight > 0:
44
+ edges.append({
45
+ "source": n1["id"],
46
+ "target": n2["id"],
47
+ "weight": weight,
48
+ "type": "resonance"
49
+ })
50
+
51
+ graph["edges"] = edges
52
+ self.manifold.save()
53
+ logger.info(f"Weaved {len(edges)} connections & folded geometry.")
54
+ return len(edges)
55
+
56
+ def _calculate_3d_position(self, node):
57
+ """
58
+ Determines (x, y, z) based on Prime Topology.
59
+ Z-axis = Domain Depth (Outer=10, Prime=5, Inner=0).
60
+ X/Y-axis = Hash Resonance (Pseudo-random scatter).
61
+ """
62
+ import math
63
+
64
+ domain = node["geometry"]["domain"]
65
+ h = node["geometry"]["hash"]
66
+
67
+ # Z-Axis: Hierarchy
68
+ z = 0
69
+ if domain == "OUTER_SHELL": z = 10.0
70
+ elif domain == "PRIME_CHANNEL": z = 5.0
71
+ elif domain == "INNER_SHELL": z = 0.0
72
+
73
+ # X/Y-Axis: Spiral Distribution based on Hash
74
+ # Use simple polar -> cartesian mapping
75
+ angle = (h % 360) * (math.pi / 180)
76
+ radius = (h % 100) / 10.0
77
+
78
+ x = radius * math.cos(angle)
79
+ y = radius * math.sin(angle)
80
+
81
+ return {"x": round(x, 2), "y": round(y, 2), "z": round(z, 2)}
82
+
83
+ def _calculate_affinity(self, n1, n2):
84
+ """
85
+ Determines connection strength (W_Lo/W_Hi canonical logic).
86
+ """
87
+ weight = 0.0
88
+
89
+ # 1. Physical Affinity (Same Directory)
90
+ # Represents "Local Connectivity" (W_Lo)
91
+ dir1 = os.path.dirname(n1["path"])
92
+ dir2 = os.path.dirname(n2["path"])
93
+ if dir1 == dir2:
94
+ weight += 0.5
95
+
96
+ # 2. Domain Hierarchy (Nested Domains)
97
+ # Represents "Hyper Connectivity" (W_Hi) via Prime Resonance
98
+ d1 = n1["geometry"]["domain"]
99
+ d2 = n2["geometry"]["domain"]
100
+ domains = {d1, d2}
101
+
102
+ # Inner <-> Prime: Structure is controlled by Logic (Strong Link)
103
+ if "INNER_SHELL" in domains and "PRIME_CHANNEL" in domains:
104
+ weight += 0.4
105
+
106
+ # Prime <-> Outer: Logic organizes Entropy (Medium Link)
107
+ if "PRIME_CHANNEL" in domains and "OUTER_SHELL" in domains:
108
+ weight += 0.3
109
+
110
+ # Inner <-> Outer: Direct entropy injection (Weak/Chaos Link)
111
+ if "INNER_SHELL" in domains and "OUTER_SHELL" in domains:
112
+ weight += 0.1
113
+
114
+ # Same Domain Resonance (e.g. Logic <-> Logic)
115
+ if d1 == d2:
116
+ weight += 0.2
117
+
118
+ if weight >= 1.0: weight = 0.99
119
+
120
+ return round(weight, 2)
121
+
122
+ if __name__ == "__main__":
123
+ # Test Run
124
+ agent = ConnectorAgent()
125
+ count = agent.connect_swarm()
126
+ print(f"Connector finished. Edges: {count}")
logos/agents/dolphin.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import math
4
+ import uuid
5
+ import re
6
+
7
+ class EntropyKillSwitch:
8
+ """
9
+ Role: SYSTEM GUARD (The 'Prefix Integrator')
10
+ Function: Monitors the 'Temperature' of the reasoning chain.
11
+ If Entropy > Threshold, it KILLS the generation to prevent hallucination.
12
+ """
13
+ def __init__(self, threshold=0.75, window_size=5):
14
+ self.threshold = threshold
15
+ self.window_size = window_size
16
+ self.entropy_trace = []
17
+ self.status = "STABLE"
18
+
19
+ def calculate_entropy(self, logprobs):
20
+ """
21
+ Converts log probabilities into Shannon Entropy.
22
+ High Entropy = High Uncertainty = Likely Hallucination.
23
+ """
24
+ if not logprobs:
25
+ return 0.0
26
+
27
+ # Convert logprobs (dict) to probability distribution
28
+ # Some APIs return logprobs as a list of dicts, others as a flat dict
29
+ if isinstance(logprobs, list):
30
+ probs = [math.exp(item.get('logprob', -100)) for item in logprobs]
31
+ else:
32
+ probs = [math.exp(lp) for lp in logprobs.values()]
33
+
34
+ # Normalize (just in case)
35
+ total_p = sum(probs)
36
+ if total_p == 0: return 1.0
37
+ probs = [p/total_p for p in probs]
38
+
39
+ # Shannon Entropy Formula: H = -sum(p * log(p))
40
+ entropy = -sum(p * math.log(p) for p in probs if p > 0)
41
+ return entropy
42
+
43
+ def monitor(self, token, logprobs):
44
+ """
45
+ Ingests a single token's data. Returns TRUE if we need to KILL.
46
+ """
47
+ current_entropy = self.calculate_entropy(logprobs)
48
+ self.entropy_trace.append(current_entropy)
49
+
50
+ # Keep window small (Prefix Integration)
51
+ if len(self.entropy_trace) > self.window_size:
52
+ self.entropy_trace.pop(0)
53
+
54
+ # Calculate Rolling Average
55
+ avg_entropy = np.mean(self.entropy_trace)
56
+
57
+ # THE KILL SWITCH
58
+ if avg_entropy > self.threshold:
59
+ self.status = "HALLUCINATION_DETECTED"
60
+ return True # TRIGGER KILL
61
+
62
+ self.status = "STABLE"
63
+ return False # CONTINUE
64
+
65
+ class DolphinOversight:
66
+ def __init__(self, swarm_state=None):
67
+ self.name = "Dolphin-x1-8b"
68
+ self.kill_switch = EntropyKillSwitch(threshold=0.8) # Tunable sensitivity
69
+ self.state = swarm_state or {}
70
+
71
+ def strip_context(self, raw_input):
72
+ """
73
+ Sanitizes input to remove conversational fluff, isolating the core directive.
74
+ """
75
+ # 1. Remove common conversational prefixes
76
+ clean_text = re.sub(r'^(please|can you|would you|swarm|logos|logo)\s+', '', raw_input, flags=re.IGNORECASE)
77
+ return clean_text.strip()
78
+
79
+ def ingest(self, user_packet):
80
+ """
81
+ The main entry point for Protocol 18.
82
+ """
83
+ packet_id = str(uuid.uuid4())[:8]
84
+ core_intent = self.strip_context(user_packet['content'])
85
+ previous_node_id = self.state.get('last_node', 1)
86
+
87
+ print(f"[{self.name}] Packet {packet_id} Ingested.")
88
+
89
+ logos_packet = {
90
+ "id": packet_id,
91
+ "type": user_packet.get('type', 'text_command'),
92
+ "origin_node": previous_node_id,
93
+ "intent": core_intent,
94
+ "content": user_packet['content'],
95
+ "meta_tags": [],
96
+ "target_coords": None
97
+ }
98
+
99
+ return self.route_packet(logos_packet)
100
+
101
+ def route_packet(self, packet):
102
+ # Dolphin Deciphers Intent
103
+ print(f"[{self.name}] Decoding Intent: '{packet['intent']}'")
104
+
105
+ if "image" in packet['type'] or "scan" in packet['intent'].lower() or ".py" in packet['intent']:
106
+ return "HANDOFF_TO_GEMMA", packet
107
+ else:
108
+ return "HANDOFF_TO_RNJ1", packet
109
+
110
+ def mhs_smoothing(self, tensor_coords, mass):
111
+ """
112
+ Protocol 22: Manifold Harmonic Smoothing (mhs).
113
+ """
114
+ node_id = tensor_coords.get('destination_node', 1)
115
+ resonance = tensor_coords.get('resonance', 'STOCHASTIC_NOISE')
116
+ delta_heat = tensor_coords.get('delta_heat', 0)
117
+
118
+ # If noise is detected, nudge towards nearest prime anchor
119
+ if resonance == "STOCHASTIC_NOISE":
120
+ anchors = [1, 3, 7, 9]
121
+ if (node_id % 10) not in anchors:
122
+ for i in range(1, 6):
123
+ if ((node_id + i) % 10) in anchors:
124
+ node_id += i
125
+ break
126
+ elif ((node_id - i) % 10) in anchors:
127
+ node_id -= i
128
+ break
129
+
130
+ # Apply 'Mass Dampening' if complexity is too high
131
+ dampened_heat = delta_heat / (1 + mass/100)
132
+
133
+ return {
134
+ "node_id": node_id,
135
+ "fidelity": 1.0 - (abs(dampened_heat) * 0.01),
136
+ "status": "MHS_STABILIZED" if not self.entropy_kill_switch(dampened_heat) else "KILL_SWITCH_ACTIVE_TURBULENCE"
137
+ }
138
+
139
+ def entropy_kill_switch(self, delta_heat):
140
+ """
141
+ Legacy heat-based kill switch for MHS logic.
142
+ """
143
+ threshold = 500
144
+ if abs(delta_heat) > threshold:
145
+ print(f"[{self.name}] ⚠️ !ENTROPY_CRITICAL! Delta Heat: {delta_heat}")
146
+ return True
147
+ return False
148
+
149
+ async def verify_output_stream(self, generator_stream):
150
+ """
151
+ Wraps the LLM output stream.
152
+ Filtering text through the Prefix Integrator Entropy Kill Switch.
153
+ """
154
+ verified_text = ""
155
+
156
+ async for token_data in generator_stream:
157
+ token = token_data.get('text', '')
158
+ logprobs = token_data.get('logprobs', None)
159
+
160
+ # CHECK ENTROPY
161
+ should_kill = self.kill_switch.monitor(token, logprobs)
162
+
163
+ if should_kill:
164
+ print(f"[{self.name}] 🚫 KILL SWITCH TRIGGERED! Entropy: {self.kill_switch.entropy_trace[-1]:.2f}")
165
+ yield "[SYSTEM INTERRUPT: HALLUCINATION DETECTED. RE-ROUTING...]"
166
+ self.trigger_correction()
167
+ break
168
+
169
+ verified_text += token
170
+ yield token
171
+
172
+ def trigger_correction(self):
173
+ # Update Swarm State to 'CAUTION' mode
174
+ self.state['active_mode'] = "HIGH_PRECISION"
175
+ print(f"[{self.name}] Swarm Mode shifted to HIGH_PRECISION due to entropy spike.")
logos/agents/life.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import logging
3
+ from logos.manifold_state import ManifoldState
4
+
5
+ logger = logging.getLogger("LifeAgent")
6
+
7
+ class LifeAgent:
8
+ """
9
+ Protocol 9: Life Agent (Cellular Dynamics)
10
+ Applies 'Game of Life' rules to the Manifold Graph.
11
+ Nodes are 'Cells'. Edges define 'Neighborhood'.
12
+ """
13
+ def __init__(self):
14
+ self.manifold = ManifoldState()
15
+
16
+ def evolve(self):
17
+ """Runs one generation of evolution."""
18
+ graph = self.manifold.state.get("graph", {})
19
+ nodes = graph.get("nodes", [])
20
+ if not nodes: return
21
+
22
+ # Initialize 'alive' state if not present (Default to Alive)
23
+ for n in nodes:
24
+ if "alive" not in n:
25
+ n["alive"] = 1 # 1 = Alive, 0 = Dead
26
+
27
+ # Calculate Next State
28
+ updates = {}
29
+ for node in nodes:
30
+ node_id = node["id"]
31
+ alive_neighbors = self._count_alive_neighbors(node_id, graph["edges"], nodes)
32
+
33
+ is_alive = node["alive"]
34
+ new_state = is_alive
35
+
36
+ # -- Conway's Rules (Adapted for Code Graph) --
37
+ # 1. Loneliness (Underpopulation): < 1 neighbor -> Die (Dead Code)
38
+ if is_alive and alive_neighbors < 1:
39
+ new_state = 0
40
+
41
+ # 2. Survival: 2 or 3 neighbors -> Stay Alive
42
+ elif is_alive and (alive_neighbors == 2 or alive_neighbors == 3):
43
+ new_state = 1
44
+
45
+ # 3. Overcrowding: > 4 neighbors -> Check for Mitosis (Refactor) vs Death
46
+ elif is_alive and alive_neighbors > 4:
47
+ # Protocol 11: Active Inference - Mitosis
48
+ if self._check_mitosis_potential(node):
49
+ node["action_pending"] = "MITOSIS"
50
+ new_state = 1 # Stay alive to split
51
+ else:
52
+ new_state = 0 # Complexity Collapse
53
+
54
+ # 4. Reproduction: Exactly 3 neighbors -> Be born (if dead)
55
+ elif not is_alive and alive_neighbors == 3:
56
+ new_state = 1
57
+ node["action_pending"] = "SPAWN_META_TOKEN" # New abstract node
58
+
59
+ # Protocol 11: Active Inference - Regeneration
60
+ # If dead but high Prime Resonance (Critical Logic), attempt Regeneration
61
+ if new_state == 0 and node["geometry"]["domain"] in ["PRIME_CHANNEL", "INNER_SHELL"]:
62
+ if self._check_regeneration_potential(node):
63
+ new_state = 1
64
+ node["action_pending"] = "REGENERATE"
65
+
66
+ # Static Override: Prime Channel nodes are "Immortal" (Logic Core)
67
+ if node["geometry"]["domain"] == "PRIME_CHANNEL":
68
+ new_state = 1
69
+
70
+ updates[node_id] = new_state
71
+
72
+ # Apply Updates
73
+ changes = 0
74
+ for node in nodes:
75
+ if node["alive"] != updates[node["id"]]:
76
+ node["alive"] = updates[node["id"]]
77
+ changes += 1
78
+
79
+ self.manifold.save()
80
+ logger.info(f"Evolution complete. {changes} cells changed state.")
81
+ return changes
82
+
83
+ def _check_mitosis_potential(self, node):
84
+ """Active Inference: Should this overcrowded node split?"""
85
+ # Heuristic: If GPF is small (Structure) and Hash is high (Complex content)
86
+ # In a real Neural CA, this would be a learned policy query.
87
+ gpf = node["geometry"]["gpf"]
88
+ return gpf < 100 # Structural nodes handle complexity by splitting
89
+
90
+ def _check_regeneration_potential(self, node):
91
+ """Active Inference: Should this dead node be healed?"""
92
+ # Heuristic: If it's a critical Logic node (Prime Channel)
93
+ domain = node["geometry"]["domain"]
94
+ return domain == "PRIME_CHANNEL"
95
+
96
+ def _count_alive_neighbors(self, node_id, edges, nodes):
97
+ node_map = {n["id"]: n for n in nodes}
98
+ count = 0
99
+ for e in edges:
100
+ if e["source"] == node_id:
101
+ neighbor = node_map.get(e["target"])
102
+ if neighbor and neighbor.get("alive", 1): count += 1
103
+ elif e["target"] == node_id:
104
+ neighbor = node_map.get(e["source"])
105
+ if neighbor and neighbor.get("alive", 1): count += 1
106
+ return count
107
+
108
+ if __name__ == "__main__":
109
+ agent = LifeAgent()
110
+ agent.evolve()
logos/agents/orchestrator.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import logging
3
+ from logos.agent_dispatcher import NeuralRouter
4
+
5
+ # Setup Logging
6
+ logging.basicConfig(
7
+ level=logging.INFO,
8
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
9
+ )
10
+
11
+ logger = logging.getLogger("SwarmMissionControl")
12
+
13
+ def main():
14
+ print("========================================")
15
+ print(" LOGOS LOCAL SWARM MISSION CONTROL ")
16
+ print("========================================")
17
+ print("Available Agents:")
18
+ print(" - FRACTAL ARCHITECT (Storage/Code)")
19
+ print(" - PRIME NAVIGATOR (Math/Physics)")
20
+ print(" - CREATIVE DIRECTOR (Aesthetics/Vision)")
21
+ print("----------------------------------------")
22
+
23
+ # Initialize the Router
24
+ # Assuming port 1234 is LM Studio
25
+ router = NeuralRouter(base_url="http://localhost:1234/v1")
26
+
27
+ print("\n[SYSTEM] Swarm standing by. Enter a mission prompt.")
28
+ print("Example: 'Analyze the project topology and shard its memory.'")
29
+
30
+ while True:
31
+ try:
32
+ mission = input("\n[ARCHITECT] > ")
33
+ if mission.lower() in ['exit', 'quit']:
34
+ break
35
+
36
+ # Run the mission through the Neural Router
37
+ router.run(mission)
38
+
39
+ except KeyboardInterrupt:
40
+ break
41
+ except Exception as e:
42
+ logger.error(f"Mission Failed: {e}")
43
+
44
+ if __name__ == "__main__":
45
+ main()
logos/agents/scout.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import logging
3
+ import time
4
+ from logos.manifold_state import ManifoldState
5
+
6
+ logger = logging.getLogger("ScoutAgent")
7
+
8
+ class ScoutAgent:
9
+ """
10
+ Protocol 7: Recursive Scout Agent
11
+ Implements the 'Recursive Language Model' (arXiv:2512.24601) paradigm.
12
+ Treats the Manifold Graph as an external environment to be traversed recursively.
13
+ """
14
+ def __init__(self, start_node=None):
15
+ self.manifold = ManifoldState()
16
+ self.graph = self.manifold.state.get("graph", {})
17
+ self.nodes = {n["id"]: n for n in self.graph.get("nodes", [])}
18
+ self.edges = self.graph.get("edges", [])
19
+ self.start_node = start_node
20
+ self.mission_log = []
21
+
22
+ def recursive_traverse(self, node_id=None, depth=0, max_depth=5):
23
+ """
24
+ The Core Recursive Operator.
25
+ Drills down from Outer Shell (High Entropy) to Inner Shell (Structure).
26
+ """
27
+ if depth > max_depth:
28
+ return
29
+
30
+ if node_id is None:
31
+ # Start at a random Outer Shell node if not specified
32
+ candidates = [n for n in self.nodes.values() if n["geometry"]["domain"] == "OUTER_SHELL"]
33
+ if not candidates:
34
+ logger.warning("No Outer Shell nodes found to start traversal.")
35
+ return
36
+ node_id = candidates[0]["id"]
37
+
38
+ current_node = self.nodes.get(node_id)
39
+ if not current_node: return
40
+
41
+ # 1. Observe (Read the Node)
42
+ indent = " " * depth
43
+ domain = current_node["geometry"]["domain"]
44
+ log_entry = f"{indent}Visiting [{domain}] {current_node['name']} (ID: {node_id})"
45
+ logger.info(log_entry)
46
+ self.mission_log.append(log_entry)
47
+
48
+ # 2. Act (Find Connections)
49
+ # We value connections that lead "Deeper" into Logic (Prime) or Structure (Inner)
50
+ connections = [
51
+ e for e in self.edges
52
+ if e["source"] == node_id or e["target"] == node_id
53
+ ]
54
+
55
+ # Sort connections by weight (Resonance)
56
+ connections.sort(key=lambda x: x["weight"], reverse=True)
57
+
58
+ # 3. Recurse (Traverse Neighbors)
59
+ # Limit recursion to top 2 strongest links to prevent explosion
60
+ for link in connections[:2]:
61
+ neighbor_id = link["target"] if link["source"] == node_id else link["source"]
62
+ neighbor = self.nodes.get(neighbor_id)
63
+ if not neighbor: continue
64
+
65
+ # recursive call
66
+ self.recursive_traverse(neighbor_id, depth + 1, max_depth)
67
+
68
+ def report(self):
69
+ """Returns the Mission Log."""
70
+ return "\n".join(self.mission_log)
71
+
72
+ if __name__ == "__main__":
73
+ # Test Run
74
+ scout = ScoutAgent()
75
+ print("Scout initializing sequence...")
76
+ scout.recursive_traverse()
77
+ print("\n--- MISSION LOG ---")
78
+ print(scout.report())
logos/agents/sensor_agent.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import time
3
+ import requests
4
+ import json
5
+ import os
6
+ from logos.network.dissolution import DissolutionEngine
7
+
8
+ class SensorAgent:
9
+ """
10
+ Protocol 16: Signal Injector.
11
+ Monitors data streams and dissolves them into tokens for the Manifold.
12
+ """
13
+ def __init__(self, target_url="http://localhost:5000/ingest"):
14
+ self.target_url = target_url
15
+
16
+ def inject_text(self, text, throttle=0.05):
17
+ """
18
+ Dissolves a string and injects its byte-tokens into the manifold.
19
+ """
20
+ print(f"🌊 Dissolving Pulse Stream: {len(text)} chars...")
21
+
22
+ # We use dissolve_bytes to get 0-255 range tokens
23
+ # Or we can use the character ordinals for more diversity
24
+ for char in text:
25
+ token = ord(char)
26
+ # Filter for the manifold's potentiality (1,3,7,9 or 2,5)
27
+ # Actually, the manifold visualizer handles the filtering
28
+ payload = {
29
+ "value": token,
30
+ "type": "DATA_PACKET",
31
+ "timestamp": time.time()
32
+ }
33
+ try:
34
+ requests.post(self.target_url, json=payload, timeout=0.1)
35
+ except:
36
+ pass # Silently drop if server is busy
37
+ time.sleep(throttle)
38
+
39
+ def watch_file(self, filepath, interval=1.0):
40
+ """
41
+ Watches a file for changes and injects new content.
42
+ """
43
+ print(f"👁️ Monitoring: {filepath}")
44
+ last_mtime = 0
45
+ while True:
46
+ try:
47
+ mtime = os.path.getmtime(filepath)
48
+ if mtime > last_mtime:
49
+ with open(filepath, 'r') as f:
50
+ content = f.read()
51
+ self.inject_text(content)
52
+ last_mtime = mtime
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ time.sleep(interval)
56
+
57
+ if __name__ == "__main__":
58
+ injector = SensorAgent()
59
+ # Test injection
60
+ injector.inject_text("LOGOS MISSION CONTROL ACTIVATED - SIGNAL STABLE")
logos/agents/tokenizer.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import ast
4
+ import zlib
5
+ import json
6
+ import logging
7
+ from logos.logos_core import get_gpf
8
+ from logos.manifold_state import ManifoldState
9
+
10
+ logger = logging.getLogger("TokenizerAgent")
11
+
12
+ class TokenizerAgent:
13
+ """
14
+ Protocol 6: Tokenizer Agent
15
+ Parses a repository (Source) into Prime Tokens (Domain Potentiality Space).
16
+ """
17
+ def __init__(self, root_dir):
18
+ self.root_dir = root_dir
19
+ self.manifold = ManifoldState() # Connects to logos/manifold.json
20
+
21
+ def scan_and_tokenize(self):
22
+ """Scans root_dir and tokenizes all supported files."""
23
+ tokens = []
24
+ for root, dirs, files in os.walk(self.root_dir):
25
+ # Skip hidden/system dirs
26
+ dirs[:] = [d for d in dirs if not d.startswith('.') and not d.startswith('__')]
27
+
28
+ for file in files:
29
+ if file.endswith(".py") or file.endswith(".md"):
30
+ path = os.path.join(root, file)
31
+ token = self._tokenize_file(path)
32
+ if token:
33
+ tokens.append(token)
34
+
35
+ self._register_tokens_to_manifold(tokens)
36
+ return tokens
37
+
38
+ def _tokenize_file(self, filepath):
39
+ """Parses a single file into a Prime Token."""
40
+ try:
41
+ with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
42
+ content = f.read()
43
+
44
+ rel_path = os.path.relpath(filepath, self.root_dir)
45
+
46
+ # --- Prime Topology Analysis ---
47
+ # 1. Hash Content -> Integer Field
48
+ h = zlib.adler32(content.encode('utf-8'))
49
+
50
+ # 2. Calculate Resonance (GPF)
51
+ gpf = get_gpf(h)
52
+
53
+ # 3. Determine Domain
54
+ if gpf < 200:
55
+ domain = "INNER_SHELL"
56
+ elif gpf < 2000:
57
+ domain = "PRIME_CHANNEL"
58
+ else:
59
+ domain = "OUTER_SHELL"
60
+
61
+ token = {
62
+ "id": h,
63
+ "name": os.path.basename(filepath),
64
+ "path": rel_path,
65
+ "type": "file",
66
+ "geometry": {
67
+ "hash": h,
68
+ "gpf": gpf,
69
+ "domain": domain
70
+ },
71
+ "content_preview": content[:100]
72
+ }
73
+ return token
74
+
75
+ except Exception as e:
76
+ logger.error(f"Failed to tokenize {filepath}: {e}")
77
+ return None
78
+
79
+ def _register_tokens_to_manifold(self, tokens):
80
+ """Updates the physical Manifold State with new tokens."""
81
+ # Ensure manifold state has a graph structure
82
+ if "graph" not in self.manifold.state:
83
+ self.manifold.state["graph"] = {"nodes": [], "edges": []}
84
+
85
+ # Add new nodes (deduplicated by ID)
86
+ existing_ids = {n["id"] for n in self.manifold.state["graph"]["nodes"]}
87
+ for t in tokens:
88
+ if t["id"] not in existing_ids:
89
+ self.manifold.state["graph"]["nodes"].append(t)
90
+
91
+ self.manifold.save()
92
+ logger.info(f"Registered {len(tokens)} tokens to Manifold.")
93
+
94
+ if __name__ == "__main__":
95
+ # Test Run
96
+ agent = TokenizerAgent(".")
97
+ print("Tokenizing current directory...")
98
+ agent.scan_and_tokenize()
99
+ print("Done.")
logos/agents/video_atomizer.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import asyncio
3
+ from youtube_transcript_api import YouTubeTranscriptApi
4
+
5
+ class VideoAtomizer:
6
+ """
7
+ Role: V-NODE (Video Ingest)
8
+ Function: Rips semantic atoms from video streams and collides them
9
+ with the existing Project Manifold.
10
+ """
11
+ def __init__(self):
12
+ self.name = "VideoAtomizer"
13
+
14
+ def extract_video_id(self, url):
15
+ # Extracts 'R9czY1uVq_k' from the URL
16
+ match = re.search(r"v=([a-zA-Z0-9_-]+)", url)
17
+ # Handle short URLs too
18
+ if not match:
19
+ match = re.search(r"youtu\.be/([a-zA-Z0-9_-]+)", url)
20
+ return match.group(1) if match else None
21
+
22
+ async def ingest_and_align(self, url, project_dna):
23
+ """
24
+ The Main Pipeline: URL -> Transcript -> Atoms -> Manifold Alignment
25
+ """
26
+ video_id = self.extract_video_id(url)
27
+ if not video_id:
28
+ return {"error": "Invalid Video URL"}
29
+
30
+ print(f"[{self.name}] Locking onto Signal: {video_id}")
31
+
32
+ # 1. FETCH TRANSCRIPT (The Raw Atoms)
33
+ try:
34
+ # We use a thread pool for the blocking API call
35
+ loop = asyncio.get_event_loop()
36
+ transcript_list = await loop.run_in_executor(None, YouTubeTranscriptApi.get_transcript, video_id)
37
+ full_text = " ".join([t['text'] for t in transcript_list])
38
+ except Exception as e:
39
+ return {"error": f"Signal Lost: {e}"}
40
+
41
+ # 2. ATOMIZE (Extract Key Concepts via Regex/Heuristics for Speed)
42
+ # We look for 'High-Entropy' terms that match your Engineering Protocol
43
+ atoms = []
44
+ keywords = ["entropy", "gradient", "hallucination", "probe", "layer", "state", "kill switch", "backtracking", "prefix integrator", "nesting", "hope"]
45
+
46
+ for word in keywords:
47
+ if word in full_text.lower():
48
+ # Create a 'Particle' with weight based on frequency
49
+ count = full_text.lower().count(word)
50
+ atoms.append({"concept": word, "mass": count})
51
+
52
+ # 3. INTERFERENCE (The Alignment Step)
53
+ # We check which files in your Project DNA resonate with these video atoms
54
+ aligned_nodes = []
55
+ for atom in atoms:
56
+ for file, dna_list in project_dna.items():
57
+ # dna_list is usually a list of tags/strings
58
+ for dna_term in dna_list:
59
+ if atom['concept'] in dna_term.lower():
60
+ aligned_nodes.append({
61
+ "source_concept": atom['concept'],
62
+ "target_file": file,
63
+ "resonance_strength": atom['mass']
64
+ })
65
+ break # One match per file/atom pair
66
+
67
+ return {
68
+ "status": "GROKKED",
69
+ "video_id": video_id,
70
+ "atoms_found": len(atoms),
71
+ "alignments": aligned_nodes, # These will become Gold Threads in UI
72
+ "summary_vector": "Detected 'Entropy Gating' - Recommend applying to Dolphin Node."
73
+ }
logos/connectors.py CHANGED
@@ -49,7 +49,7 @@ class HuggingFaceConnector:
49
  if self._client is None:
50
  try:
51
  from huggingface_hub import InferenceClient
52
- self._client = InferenceClient(token=self.config.hf_token)
53
  except ImportError:
54
  raise ImportError("huggingface_hub not installed. Run: pip install huggingface_hub")
55
  return self._client
@@ -93,49 +93,38 @@ class HuggingFaceConnector:
93
 
94
  class OCRConnector:
95
  """
96
- Adapter for EasyOCR.
97
- Provides text extraction from images.
98
  """
99
 
100
  def __init__(self, languages: List[str] = None, gpu: bool = False):
101
- self.languages = languages or ['en']
102
- self.gpu = gpu
103
- self._reader = None
104
-
105
- def _ensure_reader(self):
106
- """Lazy initialization of EasyOCR reader."""
107
- if self._reader is None:
108
- try:
109
- import easyocr
110
- self._reader = easyocr.Reader(self.languages, gpu=self.gpu)
111
- except ImportError:
112
- raise ImportError("easyocr not installed. Run: pip install easyocr")
113
- return self._reader
114
 
115
  def extract_text(self, image_path: str) -> Dict[str, Any]:
116
  """
117
- Extract text from image.
118
-
119
- Args:
120
- image_path: Path to image file
121
-
122
- Returns:
123
- Dict with text_blocks and full_text
124
  """
125
- reader = self._ensure_reader()
126
- results = reader.readtext(image_path)
127
-
128
- text_blocks = [
129
- {"text": text, "confidence": conf, "bbox": bbox}
130
- for bbox, text, conf in results
131
- ]
132
- full_text = " ".join([r[1] for r in results])
133
-
134
- return {
135
- "text_blocks": text_blocks,
136
- "full_text": full_text,
137
- "word_count": len(full_text.split())
138
- }
 
 
 
 
139
 
140
 
141
  # ==========================================
@@ -238,7 +227,7 @@ class DolphinAgentConnector:
238
  if self._client is None:
239
  try:
240
  from huggingface_hub import InferenceClient
241
- self._client = InferenceClient(token=self.config.hf_token)
242
  except ImportError:
243
  raise ImportError("huggingface_hub not installed.")
244
  return self._client
@@ -296,20 +285,67 @@ class LocalLLMConnector:
296
  Optimization: Direct localhost access (no Docker bridge lag).
297
  """
298
 
299
- def __init__(self, base_url: str = "http://localhost:1234/v1", model: str = "local-model"):
300
- self.base_url = base_url
 
 
301
  self.model = model
302
 
303
- def chat(self, message: str, system_prompt: str = None) -> str:
304
- """Chat with local model via requests."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  import requests
306
  import json
 
 
 
 
 
 
 
 
 
 
 
 
307
 
308
- endpoint = f"{self.base_url}/chat/completions"
309
- headers = {"Content-Type": "application/json"}
310
 
311
  payload = {
312
- "model": self.model,
313
  "messages": [],
314
  "temperature": 0.7,
315
  "stream": False
@@ -317,15 +353,39 @@ class LocalLLMConnector:
317
 
318
  if system_prompt:
319
  payload["messages"].append({"role": "system", "content": system_prompt})
320
- payload["messages"].append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
 
322
- try:
323
- response = requests.post(endpoint, headers=headers, json=payload, timeout=30)
324
- response.raise_for_status()
325
- data = response.json()
326
- return data['choices'][0]['message']['content']
327
- except Exception as e:
328
- return f"[Local LLM Error] Is LM Studio/Ollama running? {e}"
 
 
 
 
 
 
 
 
329
 
330
 
331
  # ==========================================
@@ -524,3 +584,81 @@ AVAILABLE_CONNECTORS = {
524
  'env_vars': []
525
  }
526
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  if self._client is None:
50
  try:
51
  from huggingface_hub import InferenceClient
52
+ self._client = InferenceClient(token=self.config.hf_token, base_url="https://router.huggingface.co")
53
  except ImportError:
54
  raise ImportError("huggingface_hub not installed. Run: pip install huggingface_hub")
55
  return self._client
 
93
 
94
  class OCRConnector:
95
  """
96
+ Adapter for Optical Character Recognition via Local Vision Model.
97
+ Uses 'google/gemma-3-4b' (or configured local model) to transcribe text from images.
98
  """
99
 
100
  def __init__(self, languages: List[str] = None, gpu: bool = False):
101
+ # We rely on the local LLM connector, 'gpu' arg is ignored as it's handled by LM Studio
102
+ from .connectors import get_connector
103
+ # Hardcoded to Gemma as requested by user ("gemma is your vision model")
104
+ self.client = get_connector('local', model="google/gemma-3-4b")
 
 
 
 
 
 
 
 
 
105
 
106
  def extract_text(self, image_path: str) -> Dict[str, Any]:
107
  """
108
+ Extract text from image using Vision Model.
 
 
 
 
 
 
109
  """
110
+ try:
111
+ prompt = "Extract and transcribe all visible text from this image exactly as it appears. Return only the text."
112
+ full_text = self.client.chat(message=prompt, image_path=image_path)
113
+
114
+ # Simple heuristic for word count
115
+ word_count = len(full_text.split())
116
+
117
+ return {
118
+ "text_blocks": [], # VLM doesn't give bounding boxes easily
119
+ "full_text": full_text,
120
+ "word_count": word_count
121
+ }
122
+ except Exception as e:
123
+ return {
124
+ "text_blocks": [],
125
+ "full_text": f"[OCR ERROR] Vision Model Failed: {e}",
126
+ "word_count": 0
127
+ }
128
 
129
 
130
  # ==========================================
 
227
  if self._client is None:
228
  try:
229
  from huggingface_hub import InferenceClient
230
+ self._client = InferenceClient(token=self.config.hf_token, base_url="https://router.huggingface.co")
231
  except ImportError:
232
  raise ImportError("huggingface_hub not installed.")
233
  return self._client
 
285
  Optimization: Direct localhost access (no Docker bridge lag).
286
  """
287
 
288
+ def __init__(self, base_url: str = None, model: str = "local-model"):
289
+ # Prioritize Environment -> Argument -> Default
290
+ env_url = os.environ.get("LOGOS_LLM_ENDPOINT")
291
+ self.base_url = base_url or env_url or "http://192.168.0.105:1234/v1"
292
  self.model = model
293
 
294
+ async def chat_async(self, message: str, system_prompt: str = None, model: str = None):
295
+ """
296
+ Asynchronous chat with local model via aiohttp.
297
+ """
298
+ import aiohttp
299
+ import json
300
+
301
+ target_model = model or self.model
302
+ payload = {
303
+ "model": target_model,
304
+ "messages": [],
305
+ "temperature": 0.7,
306
+ "stream": False
307
+ }
308
+ if system_prompt:
309
+ payload["messages"].append({"role": "system", "content": system_prompt})
310
+ payload["messages"].append({"role": "user", "content": message})
311
+
312
+ endpoint = f"{self.base_url}/chat/completions"
313
+ try:
314
+ async with aiohttp.ClientSession() as session:
315
+ async with session.post(endpoint, json=payload, timeout=10) as response:
316
+ if response.status == 200:
317
+ data = await response.json()
318
+ return data['choices'][0]['message']['content']
319
+ else:
320
+ return f"[Error] Local LLM returned status {response.status}"
321
+ except Exception as e:
322
+ return f"[Async Local LLM Error] {e}"
323
+
324
+ def chat(self, message: str, system_prompt: str = None, model: str = None, image_path: str = None) -> str:
325
+ """
326
+ Chat with local model via requests. Supports Vision if image_path is provided.
327
+ Auto-detects Docker host.
328
+ """
329
  import requests
330
  import json
331
+ import base64
332
+ import os
333
+
334
+ # Helper to encode image
335
+ def encode_image(path):
336
+ with open(path, "rb") as image_file:
337
+ return base64.b64encode(image_file.read()).decode('utf-8')
338
+
339
+ # Potential endpoints to try
340
+ endpoints = [self.base_url]
341
+ if "localhost" in self.base_url:
342
+ endpoints.append(self.base_url.replace("localhost", "host.docker.internal"))
343
 
344
+ # Use instance default if no specific model requested
345
+ target_model = model or self.model
346
 
347
  payload = {
348
+ "model": target_model,
349
  "messages": [],
350
  "temperature": 0.7,
351
  "stream": False
 
353
 
354
  if system_prompt:
355
  payload["messages"].append({"role": "system", "content": system_prompt})
356
+
357
+ if image_path and os.path.exists(image_path):
358
+ # Format message for Vision API (OpenAI compatible)
359
+ base64_image = encode_image(image_path)
360
+ user_content = [
361
+ {"type": "text", "text": message},
362
+ {
363
+ "type": "image_url",
364
+ "image_url": {
365
+ "url": f"data:image/jpeg;base64,{base64_image}"
366
+ }
367
+ }
368
+ ]
369
+ payload["messages"].append({"role": "user", "content": user_content})
370
+ else:
371
+ # Standard Text Chat
372
+ payload["messages"].append({"role": "user", "content": message})
373
 
374
+ last_error = ""
375
+
376
+ for base in endpoints:
377
+ endpoint = f"{base}/chat/completions"
378
+ try:
379
+ # Short timeout for local to fail fast
380
+ response = requests.post(endpoint, json=payload, timeout=5)
381
+ response.raise_for_status()
382
+ data = response.json()
383
+ return data['choices'][0]['message']['content']
384
+ except Exception as e:
385
+ last_error = str(e)
386
+ continue
387
+
388
+ return f"[Local LLM Error] Could not connect to Local Swarm on {endpoints}. Is LM Studio running? ({last_error})"
389
 
390
 
391
  # ==========================================
 
584
  'env_vars': []
585
  }
586
  }
587
+
588
+ # ==========================================
589
+ # CLI / TESTING
590
+ # ==========================================
591
+
592
+ def main():
593
+ """
594
+ CLI for testing connectors directly.
595
+ Usage: python -m logos.connectors --test local --model google/gemma-3-4b
596
+ """
597
+ import argparse
598
+ import sys
599
+
600
+ parser = argparse.ArgumentParser(description="LOGOS Connectors Utilities")
601
+ parser.add_argument("--test", choices=list(AVAILABLE_CONNECTORS.keys()), help="Connector to test")
602
+ parser.add_argument("--model", help="Model name for HF or Local connector")
603
+ parser.add_argument("--prompt", default="Hello, are you online?", help="Prompt to send")
604
+ parser.add_argument("--image", help="Path to image for OCR or Vision test")
605
+
606
+ args = parser.parse_args()
607
+
608
+ if not args.test:
609
+ print("Available Connectors:")
610
+ for k, v in AVAILABLE_CONNECTORS.items():
611
+ print(f" - {k:<10} : {v['name']}")
612
+ print("\nRun with --test <name> to verify a connection.")
613
+ return
614
+
615
+ print(f"--- Testing Connector: {args.test.upper()} ---")
616
+
617
+ try:
618
+ if args.test == 'local':
619
+ # Local LLM / Vision Test
620
+ model = args.model or "local-model"
621
+ print(f"Targeting Model: {model}")
622
+ client = get_connector('local', model=model)
623
+
624
+ if args.image:
625
+ print(f"Sending Vision Request with {args.image}...")
626
+ resp = client.chat(args.prompt, image_path=args.image)
627
+ else:
628
+ print(f"Sending Chat Request: '{args.prompt}'...")
629
+ resp = client.chat(args.prompt)
630
+
631
+ print(f"\n[RESPONSE]\n{resp}")
632
+
633
+ elif args.test == 'ocr':
634
+ # OCR Test (via Vision)
635
+ if not args.image:
636
+ print("Error: --image argument required for OCR test.")
637
+ return
638
+
639
+ client = get_connector('ocr')
640
+ print(f"Extracting text from {args.image}...")
641
+ res = client.extract_text(args.image)
642
+ print(f"\n[RESULT]\n{res['full_text']}")
643
+
644
+ elif args.test == 'hf':
645
+ # Hugging Face Test
646
+ client = get_connector('hf')
647
+ if args.image:
648
+ # Image Captioning
649
+ resp = client.image_to_text(args.image)
650
+ else:
651
+ # Text Gen
652
+ resp = client.text_generation(args.prompt)
653
+ print(f"\n[RESPONSE]\n{resp}")
654
+
655
+ else:
656
+ print(f"Test CLI not yet implemented for {args.test}. Import and use in Python.")
657
+
658
+ except Exception as e:
659
+ print(f"\n[FAIL] {e}")
660
+ import traceback
661
+ traceback.print_exc()
662
+
663
+ if __name__ == "__main__":
664
+ main()
logos/indexer.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ logos/indexer.py - The Codebase Aggregator
3
+ Protocol 4: Automated Context indexing
4
+
5
+ This script creates a monolithic "Dump File" of the entire project to facilitate
6
+ LLM ingestion and token indexing.
7
+
8
+ Workflow:
9
+ 1. Traverses the project root.
10
+ 2. Filters for "Signal" (Source code, Docs) vs "Noise" (Venv, Data, Cache).
11
+ 3. Aggregates content into a structured markdown file.
12
+ 4. Provides Token Estimation.
13
+
14
+ Output: 'project_context_dump.md'
15
+ """
16
+
17
+ import os
18
+ import pathspec
19
+
20
+ # Configuration
21
+ PROJECT_ROOT = "."
22
+ OUTPUT_FILE = "project_context_dump.md"
23
+
24
+ # Whitelist: Only these extensions contain "Logic/Knowledge"
25
+ INCLUDE_EXTENSIONS = {
26
+ '.py', # Source Logic
27
+ '.md', # Documentation & Knowledge Base
28
+ '.txt', # Requirements / Notes
29
+ '.bat', # Orchestration
30
+ '.sh', # Scripts
31
+ '.yaml', # Config
32
+ '.json' # Config (Be careful with data files)
33
+ }
34
+
35
+ # Blacklist: Always ignore these directories
36
+ IGNORE_DIRS = {
37
+ '.git',
38
+ '.venv',
39
+ '__pycache__',
40
+ 'node_modules',
41
+ '_archive', # Legacy code
42
+ 'LOGOS Screenshots', # Binary Data
43
+ 'LOGOS Notes', # Binary Data (Ingested via ingest_knowledge.py)
44
+ '.gemini', # Agent memory
45
+ 'artifacts' # Agent outputs
46
+ }
47
+
48
+ # Max file size to include (avoid dumping huge data files by accident)
49
+ MAX_FILE_SIZE = 100 * 1024 # 100KB
50
+
51
+ def load_gitignore(root):
52
+ """Load .gitignore to respect project exclusions."""
53
+ gitignore = os.path.join(root, ".gitignore")
54
+ if os.path.exists(gitignore):
55
+ with open(gitignore, 'r') as f:
56
+ return pathspec.PathSpec.from_lines('gitwildmatch', f)
57
+ return None
58
+
59
+ def estimate_tokens(text):
60
+ """Rough estimation: 4 chars ~= 1 token"""
61
+ return len(text) // 4
62
+
63
+ def main():
64
+ print(f"--- LOGOS Indexing Protocol ---")
65
+ print(f"Root: {os.path.abspath(PROJECT_ROOT)}")
66
+
67
+ spec = load_gitignore(PROJECT_ROOT)
68
+
69
+ total_files = 0
70
+ total_chars = 0
71
+
72
+ with open(OUTPUT_FILE, "w", encoding="utf-8") as out:
73
+ # Header
74
+ out.write(f"# LOGOS Project Context Dump\n")
75
+ out.write(f"Generated: {os.path.abspath(OUTPUT_FILE)}\n\n")
76
+
77
+ for root, dirs, files in os.walk(PROJECT_ROOT):
78
+ # 1. Filtering Directories
79
+ dirs[:] = [d for d in dirs if d not in IGNORE_DIRS]
80
+
81
+ for file in files:
82
+ ext = os.path.splitext(file)[1].lower()
83
+
84
+ # 2. Filtering Extensions
85
+ if ext not in INCLUDE_EXTENSIONS:
86
+ continue
87
+
88
+ filepath = os.path.join(root, file)
89
+ relpath = os.path.relpath(filepath, PROJECT_ROOT)
90
+
91
+ # 3. Filtering GitIgnore
92
+ if spec and spec.match_file(relpath):
93
+ continue
94
+
95
+ # 4. Filtering Size
96
+ try:
97
+ size = os.path.getsize(filepath)
98
+ if size > MAX_FILE_SIZE:
99
+ print(f"[SKIP] Too large: {relpath} ({size/1024:.1f}KB)")
100
+ continue
101
+
102
+ # 5. Ingestion
103
+ with open(filepath, "r", encoding="utf-8", errors="ignore") as f:
104
+ content = f.read()
105
+
106
+ # Format
107
+ out.write(f"================================================================================\n")
108
+ out.write(f"FILE: {relpath}\n")
109
+ out.write(f"================================================================================\n")
110
+ out.write(f"```{ext[1:]}\n")
111
+ out.write(content)
112
+ out.write(f"\n```\n\n")
113
+
114
+ total_files += 1
115
+ total_chars += len(content)
116
+ print(f"[INDEX] Added: {relpath}")
117
+
118
+ except Exception as e:
119
+ print(f"[WARN] Could not read {relpath}: {e}")
120
+
121
+ # Stats
122
+ tokens = total_chars // 4
123
+ print(f"\n--- Indexing Complete ---")
124
+ print(f"Files Processed: {total_files}")
125
+ print(f"Total Characters: {total_chars:,}")
126
+ print(f"Estimated Tokens: {tokens:,}")
127
+ print(f"Output: {OUTPUT_FILE}")
128
+
129
+ if __name__ == "__main__":
130
+ main()