widgetdc-cortex / apps /backend /src /mcp /servers /NeuralBridgeServer.ts
Kraft102's picture
Deploy from GitHub Actions 2025-12-16_04-55-23
f1a6f7e verified
/**
* ╔═══════════════════════════════════════════════════════════════════════════╗
* β•‘ THE SYNAPSE PROTOCOL - NEURAL BRIDGE β•‘
* ║═══════════════════════════════════════════════════════════════════════════║
* β•‘ MCP Server that bridges Claude Desktop ↔ WidgeTDC β•‘
* β•‘ Refactored: Modular Tool Architecture β•‘
* β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
*/
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
CallToolRequestSchema,
ListToolsRequestSchema,
ListResourcesRequestSchema,
ReadResourceRequestSchema,
} from '@modelcontextprotocol/sdk/types.js';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
import { randomUUID } from 'crypto';
import { neo4jAdapter } from '../../adapters/Neo4jAdapter.js';
import { EmailIngestor } from '../../services/ingestors/EmailIngestor.js';
import { CalendarIngestor } from '../../services/ingestors/CalendarIngestor.js';
import { sentinelEngine } from '../../services/SentinelEngine.js';
import { auditoryService } from '../../services/AuditoryService.js';
import { motorCortex } from '../../services/MotorCortex.js';
import { temporalLobe } from '../../services/TemporalLobe.js';
import { prefrontalCortex } from '../../services/PrefrontalCortex.js';
import puppeteer, { Browser, Page } from 'puppeteer';
// Tool Modules
import { FILE_SYSTEM_TOOLS, SAFE_DESKTOP_PATH, SNAPSHOTS_PATH, VIDENSARKIV_PATH, listSafeFiles, ensureSafeZoneExists } from '../tools/FileSystemTools.js';
import { GRAPH_TOOLS } from '../tools/GraphTools.js';
import { AGENT_TOOLS } from '../tools/AgentTools.js';
const FRONTEND_URL = process.env.FRONTEND_URL || 'http://localhost:8888';
interface SystemHealth {
status: 'healthy' | 'degraded' | 'critical';
score: number;
components: {
name: string;
healthy: boolean;
latency?: number;
errorRate?: number;
message?: string;
}[];
timestamp: string;
}
class NeuralBridgeServer {
private server: Server;
private systemHealth: SystemHealth = {
status: 'healthy',
score: 1.0,
components: [],
timestamp: new Date().toISOString()
};
// Combined tool registry
private tools: any[] = [
...FILE_SYSTEM_TOOLS,
...GRAPH_TOOLS,
...AGENT_TOOLS
];
constructor() {
this.server = new Server(
{
name: 'widgetdc-neural-bridge',
version: '2.1.0', // Updated version
},
{
capabilities: {
tools: {},
resources: {},
},
}
);
this.setupHandlers();
this.startHealthMonitoring();
}
private setupHandlers(): void {
// List available tools
this.server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools: [
{
name: 'get_system_health',
description: 'Get WidgeTDC system health status including Neo4j and all adapters',
inputSchema: {
type: 'object',
properties: {
detailed: {
type: 'boolean',
description: 'Include detailed component health'
}
}
}
},
{
name: 'execute_widget_command',
description: 'Execute a command in WidgeTDC system',
inputSchema: {
type: 'object',
properties: {
command: {
type: 'string',
enum: ['harvest', 'analyze', 'search', 'status', 'refresh'],
description: 'Command to execute'
},
params: {
type: 'object',
description: 'Command parameters'
}
},
required: ['command']
}
},
// Include modular tools
...this.tools.map(t => ({
name: t.name,
description: t.description,
inputSchema: t.inputSchema
})),
// ═══════════════════════════════════════════════════════════════
// PRD to Prototype Tools
// ═══════════════════════════════════════════════════════════════
{
name: 'generate_prototype',
description: 'Generate an HTML prototype from a PRD document. Returns complete functional HTML code.',
inputSchema: {
type: 'object',
properties: {
prdContent: {
type: 'string',
description: 'The PRD content (text, markdown, or [PDF:base64] prefixed base64 data)'
},
style: {
type: 'string',
enum: ['modern', 'minimal', 'corporate', 'tdc-brand'],
description: 'Visual style for the prototype (default: modern)'
},
locale: {
type: 'string',
description: 'Locale for UI text (default: da-DK)'
}
},
required: ['prdContent']
}
},
{
name: 'save_prototype',
description: 'Save a generated prototype to the database and Neo4j knowledge graph',
inputSchema: {
type: 'object',
properties: {
name: {
type: 'string',
description: 'Name for the prototype'
},
htmlContent: {
type: 'string',
description: 'The HTML content of the prototype'
},
prdId: {
type: 'string',
description: 'Optional ID of the source PRD document'
}
},
required: ['name', 'htmlContent']
}
},
{
name: 'list_prototypes',
description: 'List all saved prototypes',
inputSchema: {
type: 'object',
properties: {}
}
},
// ═══════════════════════════════════════════════════════════════
// 🧠 COGNITIVE SENSES - Neural Bridge v2.2
// ═══════════════════════════════════════════════════════════════
{
name: 'activate_associative_memory',
description: 'The Cortical Flash: Simulates brain-wide activation. Combines semantic search with graph traversal for full contextual memory recall.',
inputSchema: {
type: 'object',
properties: {
concept: {
type: 'string',
description: 'The core concept to activate (e.g., "GDPR", "Showpad", "Authentication")'
},
depth: {
type: 'number',
description: 'Graph traversal depth (default: 2)'
}
},
required: ['concept']
}
},
{
name: 'sense_molecular_state',
description: 'The Olfactory Sense: Calculates file integrity (MD5 hash) and compares with Graph Memory to detect mutations/changes.',
inputSchema: {
type: 'object',
properties: {
path: {
type: 'string',
description: 'Absolute path to the file to sense'
}
},
required: ['path']
}
},
{
name: 'emit_sonar_pulse',
description: 'The Sonar Pulse: Active echolocation to measure service distance (latency) and health quality.',
inputSchema: {
type: 'object',
properties: {
target: {
type: 'string',
enum: ['neo4j', 'postgres', 'internet', 'filesystem', 'backend'],
description: 'Target to ping'
}
},
required: ['target']
}
},
{
name: 'trigger_ingestion',
description: 'Trigger ingestion pipeline for email or calendar sources (mocked until auth is wired).',
inputSchema: {
type: 'object',
properties: {
source: {
type: 'string',
enum: ['email', 'calendar'],
description: 'Ingestion source to trigger'
},
limit: {
type: 'number',
description: 'Optional max items to fetch (default 5)'
}
},
required: ['source']
}
},
// ═══════════════════════════════════════════════════════════════
// 🧩 CURIOSITY & SENTINEL - Knowledge Gap Tracking v2.2
// ═══════════════════════════════════════════════════════════════
{
name: 'register_knowledge_gap',
description: 'Register a gap in the system\'s knowledge. Used when AI identifies missing information that needs to be acquired. Supports ONE_OFF (single answer needed) vs CONSTANT_STREAM (continuous monitoring).',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The question or topic where knowledge is missing (e.g., "What is the current Neo4j version?", "Monitor competitor pricing")'
},
lifecycle: {
type: 'string',
enum: ['ONE_OFF', 'CONSTANT_STREAM'],
description: 'ONE_OFF: Single answer needed, then resolved. CONSTANT_STREAM: Continuous monitoring/updates required.'
},
priority: {
type: 'string',
enum: ['low', 'medium', 'high', 'critical'],
description: 'Priority level for addressing this gap (default: medium)'
},
context: {
type: 'string',
description: 'Additional context about why this knowledge is needed'
},
tags: {
type: 'array',
items: { type: 'string' },
description: 'Tags for categorizing the knowledge gap'
}
},
required: ['query', 'lifecycle']
}
},
{
name: 'get_knowledge_gaps',
description: 'Retrieve registered knowledge gaps. Filter by status, lifecycle, or priority.',
inputSchema: {
type: 'object',
properties: {
status: {
type: 'string',
enum: ['OPEN', 'IN_PROGRESS', 'RESOLVED', 'STALE'],
description: 'Filter by gap status'
},
lifecycle: {
type: 'string',
enum: ['ONE_OFF', 'CONSTANT_STREAM'],
description: 'Filter by lifecycle type'
},
priority: {
type: 'string',
enum: ['low', 'medium', 'high', 'critical'],
description: 'Filter by priority'
},
limit: {
type: 'number',
description: 'Maximum gaps to return (default: 20)'
}
}
}
},
{
name: 'resolve_knowledge_gap',
description: 'Mark a knowledge gap as resolved with the acquired knowledge/answer.',
inputSchema: {
type: 'object',
properties: {
gapId: {
type: 'string',
description: 'The ID of the knowledge gap to resolve'
},
resolution: {
type: 'string',
description: 'The answer/knowledge that fills the gap'
},
source: {
type: 'string',
description: 'Where the knowledge was acquired from'
},
keepMonitoring: {
type: 'boolean',
description: 'For CONSTANT_STREAM gaps: keep monitoring even after resolution (default: true for CONSTANT_STREAM, false for ONE_OFF)'
}
},
required: ['gapId', 'resolution']
}
},
// ═══════════════════════════════════════════════════════════════
// πŸ€– SENTINEL ENGINE - Advanced Autonomous Features v2.3
// ═══════════════════════════════════════════════════════════════
{
name: 'rate_resolution',
description: 'Provide feedback on a resolution quality. This trains the system to improve future resolutions.',
inputSchema: {
type: 'object',
properties: {
gapId: {
type: 'string',
description: 'The ID of the knowledge gap'
},
resolutionIndex: {
type: 'number',
description: 'Which resolution to rate (0 = most recent, 1 = second most recent, etc.)'
},
wasUseful: {
type: 'boolean',
description: 'Was this resolution actually useful?'
},
qualityScore: {
type: 'number',
description: 'Quality score from 0.0 (useless) to 1.0 (perfect)'
}
},
required: ['gapId', 'wasUseful', 'qualityScore']
}
},
{
name: 'get_learning_insights',
description: 'Get AI learning insights - which sources perform best, recommended patterns for different gap types.',
inputSchema: {
type: 'object',
properties: {}
}
},
{
name: 'get_sentinel_status',
description: 'Get the status of the Sentinel Engine including temporal monitoring state, pending checks, and system health.',
inputSchema: {
type: 'object',
properties: {
includeStaleGaps: {
type: 'boolean',
description: 'Include list of stale gaps'
},
includeDueForCheck: {
type: 'boolean',
description: 'Include gaps due for scheduled check'
}
}
}
},
{
name: 'set_gap_schedule',
description: 'Configure temporal monitoring schedule for a CONSTANT_STREAM gap.',
inputSchema: {
type: 'object',
properties: {
gapId: {
type: 'string',
description: 'The ID of the knowledge gap'
},
checkIntervalHours: {
type: 'number',
description: 'How often to check for updates (in hours). Default is 24.'
}
},
required: ['gapId', 'checkIntervalHours']
}
},
{
name: 'get_resolution_history',
description: 'Get the full resolution history for a knowledge gap, including feedback scores.',
inputSchema: {
type: 'object',
properties: {
gapId: {
type: 'string',
description: 'The ID of the knowledge gap'
}
},
required: ['gapId']
}
},
{
name: 'trigger_gap_resolution',
description: 'Manually trigger auto-resolution for a specific gap. The Sentinel will attempt to resolve using the best strategy for the gap type.',
inputSchema: {
type: 'object',
properties: {
gapId: {
type: 'string',
description: 'The ID of the knowledge gap to resolve'
}
},
required: ['gapId']
}
},
// ═══════════════════════════════════════════════════════════════
// 🎭 THE MUSE - Idea Incubation System v1.0
// ═══════════════════════════════════════════════════════════════
{
name: 'incubate_idea',
description: 'Store a spontaneous idea for later consideration. This tool ONLY saves data - it does NOT trigger any actions. Ideas are \"incubated\" and can be promoted to tasks later.',
inputSchema: {
type: 'object',
properties: {
title: {
type: 'string',
description: 'Short title for the idea'
},
hypothesis: {
type: 'string',
description: 'The full idea/hypothesis description'
},
confidence: {
type: 'number',
description: 'Confidence level 0.0-1.0 that this idea is worth pursuing'
},
tags: {
type: 'array',
items: { type: 'string' },
description: 'Tags for categorizing the idea (e.g., [\"feature\", \"optimization\", \"experiment\"])'
},
agent: {
type: 'string',
description: 'Which agent proposed this idea (e.g., \"claude\", \"gemini\", \"sentinel\")'
},
relatedTo: {
type: 'string',
description: 'Optional: ID of related entity (gap, node, etc.)'
}
},
required: ['title', 'hypothesis']
}
},
{
name: 'get_ideas',
description: 'Retrieve incubated ideas. Filter by status, tags, confidence threshold, or agent.',
inputSchema: {
type: 'object',
properties: {
status: {
type: 'string',
enum: ['INCUBATED', 'PROMOTED', 'REJECTED', 'IMPLEMENTED'],
description: 'Filter by idea status'
},
tag: {
type: 'string',
description: 'Filter by tag'
},
minConfidence: {
type: 'number',
description: 'Minimum confidence threshold (0.0-1.0)'
},
agent: {
type: 'string',
description: 'Filter by proposing agent'
},
limit: {
type: 'number',
description: 'Maximum ideas to return (default: 20)'
}
}
}
},
{
name: 'promote_idea',
description: 'Promote an incubated idea to a task or mark it as rejected/implemented.',
inputSchema: {
type: 'object',
properties: {
ideaId: {
type: 'string',
description: 'The ID of the idea'
},
action: {
type: 'string',
enum: ['PROMOTE', 'REJECT', 'IMPLEMENT'],
description: 'Action to take on the idea'
},
reason: {
type: 'string',
description: 'Reason for the action'
},
taskDescription: {
type: 'string',
description: 'For PROMOTE: description of the task to create'
}
},
required: ['ideaId', 'action']
}
},
// ═══════════════════════════════════════════════════════════════
// πŸ‘οΈ THE VISUAL CORTEX - Frontend Perception v1.0
// ═══════════════════════════════════════════════════════════════
{
name: 'inspect_frontend',
description: 'Captures a visual snapshot of the running WidgeTDC frontend using a headless browser. See exactly what the user sees. Useful for UI validation, visual regression detection, and console error monitoring.',
inputSchema: {
type: 'object',
properties: {
route: {
type: 'string',
description: 'The route to visit (e.g., \"/\" or \"/dashboard\"). Defaults to root.'
},
action: {
type: 'string',
enum: ['SCREENSHOT', 'ANALYZE_LAYOUT', 'CHECK_CONSOLE_ERRORS', 'FULL_AUDIT'],
description: 'SCREENSHOT: Capture visual snapshot. ANALYZE_LAYOUT: Describe page structure. CHECK_CONSOLE_ERRORS: Report browser console errors. FULL_AUDIT: All of the above.'
},
selector: {
type: 'string',
description: 'Optional: Focus on a specific element (e.g., \"#widget-1\", \".navbar\").'
},
viewport: {
type: 'object',
properties: {
width: { type: 'number', description: 'Viewport width (default: 1920)' },
height: { type: 'number', description: 'Viewport height (default: 1080)' }
},
description: 'Custom viewport dimensions'
},
waitFor: {
type: 'number',
description: 'Milliseconds to wait after page load before action (default: 2000)'
}
},
required: ['action']
}
},
// ═══════════════════════════════════════════════════════════════
// 🎨 LIQUID UI ENGINE - Dynamic Component Rendering v1.0
// ═══════════════════════════════════════════════════════════════
{
name: 'render_ui_component',
description: 'Renders a dynamic UI component in the chat interface. Use this instead of plain text when showing metrics, diffs, graphs, logs, or other visual data. The frontend will render the appropriate smart component.',
inputSchema: {
type: 'object',
properties: {
component: {
type: 'string',
enum: [
// Liquid UI Components
'LiveMetricGauge',
'CodeDiffViewer',
'LogStreamViewer',
'InteractiveForceGraph',
'KnowledgeGapCard',
'AgentProcessTracker',
'IdeaStickyNote',
// Visual Cortex Diagram Components
'MermaidDiagram',
'ArchitectureView',
'SequenceDiagram',
'FlowchartView',
'MindMapView'
],
description: 'The component to render. LIQUID UI: LiveMetricGauge (metrics), CodeDiffViewer (code diff), LogStreamViewer (logs), InteractiveForceGraph (graph), KnowledgeGapCard (gaps), AgentProcessTracker (tasks), IdeaStickyNote (ideas). VISUAL CORTEX: MermaidDiagram (any Mermaid DSL), ArchitectureView (system architecture), SequenceDiagram (interaction flows), FlowchartView (processes), MindMapView (brainstorming).'
},
props: {
type: 'object',
description: 'Component-specific properties. See each component for required/optional props.',
additionalProperties: true
}
},
required: ['component', 'props']
}
},
// ═══════════════════════════════════════════════════════════════
// 🎨 VISUAL CORTEX - Diagram Generation & Export v1.0
// ═══════════════════════════════════════════════════════════════
{
name: 'generate_diagram',
description: 'Generates diagram code from natural language description. Returns Mermaid syntax that can be rendered with render_ui_component.',
inputSchema: {
type: 'object',
properties: {
description: {
type: 'string',
description: 'Natural language description of the diagram you want to create'
},
diagram_type: {
type: 'string',
enum: ['flowchart', 'sequence', 'architecture', 'mindmap', 'er', 'gantt', 'pie', 'class'],
description: 'Type of diagram to generate'
},
style: {
type: 'string',
enum: ['minimal', 'detailed', 'technical'],
description: 'Level of detail in the diagram'
}
},
required: ['description', 'diagram_type']
}
},
{
name: 'export_visual',
description: 'Exports a diagram or presentation to a downloadable format (SVG, PNG, PPTX).',
inputSchema: {
type: 'object',
properties: {
content_type: {
type: 'string',
enum: ['diagram', 'presentation'],
description: 'What to export'
},
mermaid_code: {
type: 'string',
description: 'Mermaid diagram code (for diagram export)'
},
slides: {
type: 'array',
items: {
type: 'object',
properties: {
title: { type: 'string' },
bullets: { type: 'array', items: { type: 'string' } },
notes: { type: 'string' }
}
},
description: 'Slide content (for presentation export)'
},
format: {
type: 'string',
enum: ['svg', 'pptx'],
description: 'Export format'
},
title: {
type: 'string',
description: 'Title for the export'
}
},
required: ['content_type', 'format']
}
},
// ═══════════════════════════════════════════════════════════════
// 🧠 COGNITIVE MODULE HANDLERS - Specialized Brain Functions v1.0
// ═══════════════════════════════════════════════════════════════
{
name: 'listen_to_logs',
description: 'Listen to system logs and detect anomalies. The auditory system monitors log streams, detects error patterns, and alerts on anomalies.',
inputSchema: {
type: 'object',
properties: {
action: {
type: 'string',
enum: ['START_LISTENING', 'STOP_LISTENING', 'GET_SIGNALS', 'GET_ANOMALIES', 'ANALYZE_LOGS', 'GET_STATUS'],
description: 'START_LISTENING: Begin monitoring a source. STOP_LISTENING: Stop monitoring. GET_SIGNALS: Get recent signals. GET_ANOMALIES: Get detected anomalies. ANALYZE_LOGS: Analyze log content. GET_STATUS: Get auditory system status.'
},
source: { type: 'string', description: 'Source to listen to (for START_LISTENING)' },
sessionId: { type: 'string', description: 'Session ID (for STOP_LISTENING)' },
logs: { type: 'array', items: { type: 'string' }, description: 'Log lines to analyze (for ANALYZE_LOGS)' },
filter: {
type: 'object',
properties: {
type: { type: 'string' },
volume: { type: 'string' },
limit: { type: 'number' }
}
}
},
required: ['action']
}
},
{
name: 'execute_action',
description: 'Request and execute actions (git operations, file management, shell commands). Actions may require approval based on risk level.',
inputSchema: {
type: 'object',
properties: {
action: {
type: 'string',
enum: ['REQUEST', 'APPROVE', 'REJECT', 'GET_PENDING', 'GET_HISTORY', 'GET_STATUS'],
description: 'REQUEST: Request a new action. APPROVE: Approve pending action. REJECT: Reject pending action. GET_PENDING: List pending actions. GET_HISTORY: Get action history. GET_STATUS: Get motor cortex status.'
},
actionType: {
type: 'string',
enum: ['GIT_COMMIT', 'GIT_PUSH', 'GIT_BRANCH', 'FILE_CREATE', 'FILE_MODIFY', 'FILE_DELETE', 'SHELL_COMMAND', 'NPM_INSTALL', 'NPM_RUN'],
description: 'Type of action to execute (for REQUEST)'
},
description: { type: 'string', description: 'What the action does' },
command: { type: 'string', description: 'Shell command (for SHELL_COMMAND)' },
targetPath: { type: 'string', description: 'File path (for FILE_* actions)' },
content: { type: 'string', description: 'File content (for FILE_CREATE/MODIFY)' },
params: { type: 'object', description: 'Additional parameters' },
actionId: { type: 'string', description: 'Action ID (for APPROVE/REJECT)' },
requestedBy: { type: 'string', description: 'Who is requesting the action' }
},
required: ['action']
}
},
{
name: 'memory_operation',
description: 'Access and manage long-term memory. Record episodes, learn facts, recognize patterns.',
inputSchema: {
type: 'object',
properties: {
action: {
type: 'string',
enum: ['RECORD_EPISODE', 'RECALL_EPISODE', 'SEARCH_EPISODES', 'LEARN_FACT', 'QUERY_FACTS', 'GET_PATTERNS', 'GET_STATUS'],
description: 'RECORD_EPISODE: Store a sequence of events. RECALL_EPISODE: Retrieve and strengthen memory. SEARCH_EPISODES: Search past experiences. LEARN_FACT: Store a semantic fact. QUERY_FACTS: Query learned facts. GET_PATTERNS: Get recognized patterns. GET_STATUS: Get memory status.'
},
episode: {
type: 'object',
properties: {
title: { type: 'string' },
description: { type: 'string' },
events: { type: 'array', items: { type: 'object' } },
outcome: { type: 'string', enum: ['SUCCESS', 'FAILURE', 'PARTIAL', 'UNKNOWN'] },
lessons: { type: 'array', items: { type: 'string' } },
tags: { type: 'array', items: { type: 'string' } }
},
description: 'Episode data (for RECORD_EPISODE)'
},
episodeId: { type: 'string', description: 'Episode ID (for RECALL_EPISODE)' },
fact: {
type: 'object',
properties: {
subject: { type: 'string' },
predicate: { type: 'string' },
object: { type: 'string' },
source: { type: 'string' }
},
description: 'Fact to learn (for LEARN_FACT)'
},
query: {
type: 'object',
properties: {
keywords: { type: 'array', items: { type: 'string' } },
tags: { type: 'array', items: { type: 'string' } },
minImportance: { type: 'number' },
limit: { type: 'number' }
},
description: 'Search query (for SEARCH_EPISODES/QUERY_FACTS)'
}
},
required: ['action']
}
},
{
name: 'strategic_planning',
description: 'Manage goals, create plans, make decisions. The executive function for coordinating system behavior.',
inputSchema: {
type: 'object',
properties: {
action: {
type: 'string',
enum: ['CREATE_GOAL', 'UPDATE_GOAL', 'GET_GOALS', 'CREATE_PLAN', 'UPDATE_PLAN_STEP', 'CREATE_DECISION', 'MAKE_DECISION', 'SET_FOCUS', 'GET_SUMMARY'],
description: 'CREATE_GOAL: Define a new objective. UPDATE_GOAL: Update goal progress/status. GET_GOALS: List goals. CREATE_PLAN: Create action plan for goal. UPDATE_PLAN_STEP: Update step in plan. CREATE_DECISION: Create decision to be made. MAKE_DECISION: Select an option. SET_FOCUS: Set current focus. GET_SUMMARY: Get executive summary.'
},
goal: {
type: 'object',
properties: {
title: { type: 'string' },
description: { type: 'string' },
priority: { type: 'string', enum: ['CRITICAL', 'HIGH', 'MEDIUM', 'LOW'] },
timeframe: { type: 'string', enum: ['IMMEDIATE', 'SHORT_TERM', 'MEDIUM_TERM', 'LONG_TERM'] },
successCriteria: { type: 'array', items: { type: 'string' } },
tags: { type: 'array', items: { type: 'string' } }
},
description: 'Goal data (for CREATE_GOAL)'
},
goalId: { type: 'string', description: 'Goal ID (for UPDATE_GOAL, CREATE_PLAN, SET_FOCUS)' },
progress: { type: 'number', description: 'Progress 0-100 (for UPDATE_GOAL)' },
status: { type: 'string', description: 'New status (for UPDATE_GOAL)' },
plan: {
type: 'object',
properties: {
title: { type: 'string' },
steps: { type: 'array', items: { type: 'object' } },
estimatedEffort: { type: 'string' }
},
description: 'Plan data (for CREATE_PLAN)'
},
planId: { type: 'string', description: 'Plan ID (for UPDATE_PLAN_STEP)' },
stepId: { type: 'string', description: 'Step ID (for UPDATE_PLAN_STEP)' },
stepStatus: { type: 'string', description: 'Step status (for UPDATE_PLAN_STEP)' },
decision: {
type: 'object',
properties: {
question: { type: 'string' },
context: { type: 'string' },
options: { type: 'array', items: { type: 'object' } }
},
description: 'Decision data (for CREATE_DECISION)'
},
decisionId: { type: 'string', description: 'Decision ID (for MAKE_DECISION)' },
optionId: { type: 'string', description: 'Selected option ID (for MAKE_DECISION)' },
rationale: { type: 'string', description: 'Reason for decision (for MAKE_DECISION)' },
filter: {
type: 'object',
properties: {
status: { type: 'string' },
priority: { type: 'string' },
timeframe: { type: 'string' }
},
description: 'Filter for GET_GOALS'
}
},
required: ['action']
}
}
]
}));
// Handle tool calls
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
// Check if it's one of our modular tools
const tool = this.tools.find(t => t.name === name);
if (tool && tool.handler) {
return await tool.handler(args);
}
try {
switch (name) {
case 'get_system_health':
return await this.handleGetSystemHealth(args);
case 'execute_widget_command':
return await this.handleExecuteCommand(args);
// ═══════════════════════════════════════════════════════════
// 🧠 Cognitive Sense Handlers
// ═══════════════════════════════════════════════════════════
case 'activate_associative_memory':
return await this.handleAssociativeMemory(args);
case 'sense_molecular_state':
return await this.handleMolecularSense(args);
case 'emit_sonar_pulse':
return await this.handleSonarPulse(args);
case 'trigger_ingestion':
return await this.handleTriggerIngestion(args);
// ═══════════════════════════════════════════════════════════
// 🧩 Knowledge Gap Handlers
// ═══════════════════════════════════════════════════════════
case 'register_knowledge_gap':
return await this.handleRegisterKnowledgeGap(args);
case 'get_knowledge_gaps':
return await this.handleGetKnowledgeGaps(args);
case 'resolve_knowledge_gap':
return await this.handleResolveKnowledgeGap(args);
// ═══════════════════════════════════════════════════════════
// πŸ€– Sentinel Engine Handlers
// ═══════════════════════════════════════════════════════════
case 'rate_resolution':
return await this.handleRateResolution(args);
case 'get_learning_insights':
return await this.handleGetLearningInsights(args);
case 'get_sentinel_status':
return await this.handleGetSentinelStatus(args);
case 'set_gap_schedule':
return await this.handleSetGapSchedule(args);
case 'get_resolution_history':
return await this.handleGetResolutionHistory(args);
case 'trigger_gap_resolution':
return await this.handleTriggerGapResolution(args);
// ═══════════════════════════════════════════════════════════
// 🎭 The Muse Handlers
// ═══════════════════════════════════════════════════════════
case 'incubate_idea':
return await this.handleIncubateIdea(args);
case 'get_ideas':
return await this.handleGetIdeas(args);
case 'promote_idea':
return await this.handlePromoteIdea(args);
// ═══════════════════════════════════════════════════════════
// πŸ‘οΈ Visual Cortex Handler
// ═══════════════════════════════════════════════════════════
case 'inspect_frontend':
return await this.handleInspectFrontend(args);
// ═══════════════════════════════════════════════════════════
// 🎨 Liquid UI Handler
// ═══════════════════════════════════════════════════════════
case 'render_ui_component':
return await this.handleRenderUIComponent(args);
case 'generate_diagram':
return await this.handleGenerateDiagram(args);
case 'export_visual':
return await this.handleExportVisual(args);
// ═══════════════════════════════════════════════════════════
// 🧠 Cognitive Module Handlers
// ═══════════════════════════════════════════════════════════
case 'listen_to_logs':
return await this.handleListenToLogs(args);
case 'execute_action':
return await this.handleExecuteAction(args);
case 'memory_operation':
return await this.handleMemoryOperation(args);
case 'strategic_planning':
return await this.handleStrategicPlanning(args);
// Note: The original file might have contained other tool definitions and handlers not shown here.
// This correction assumes the provided `potentially_problematic_new_string` is the complete file content.
default:
throw new Error(`Unknown tool: ${name}`);
}
} catch (error: any) {
return {
content: [{
type: 'text',
text: `Error: ${error.message}`
}],
isError: true
};
}
});
// List resources
this.server.setRequestHandler(ListResourcesRequestSchema, async () => ({
resources: [
{
uri: 'widgetdc://health',
name: 'System Health',
description: 'Current WidgeTDC system health status',
mimeType: 'application/json'
},
{
uri: 'widgetdc://dropzone',
name: 'DropZone Files',
description: 'Files in the safe DropZone folder',
mimeType: 'application/json'
},
{
uri: 'widgetdc://vidensarkiv',
name: 'Knowledge Archive',
description: 'Files in the vidensarkiv',
mimeType: 'application/json'
},
{
uri: 'widgetdc://graph',
name: 'Knowledge Graph',
description: 'Neo4j knowledge graph statistics',
mimeType: 'application/json'
}
]
}));
// Read resources
this.server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
const { uri } = request.params;
switch (uri) {
case 'widgetdc://health':
return {
contents: [{
uri,
mimeType: 'application/json',
text: JSON.stringify(this.systemHealth, null, 2)
}]
};
case 'widgetdc://dropzone':
await ensureSafeZoneExists(); // From FileSystemTools
const files = await listSafeFiles(SAFE_DESKTOP_PATH);
return {
contents: [{
uri,
mimeType: 'application/json',
text: JSON.stringify(files, null, 2)
}]
};
case 'widgetdc://vidensarkiv':
const arkivFiles = await listSafeFiles(VIDENSARKIV_PATH, true);
return {
contents: [{
uri,
mimeType: 'application/json',
text: JSON.stringify(arkivFiles, null, 2)
}]
};
case 'widgetdc://graph':
const graphHealth = await neo4jAdapter.healthCheck();
return {
contents: [{
uri,
mimeType: 'application/json',
text: JSON.stringify(graphHealth, null, 2)
}]
};
default:
throw new Error(`Unknown resource: ${uri}`);
}
});
}
// ═══════════════════════════════════════════════════════════════════════
// Core Handlers (System Health, etc.)
// ═══════════════════════════════════════════════════════════════════════
private async handleGetSystemHealth(args: any) {
const detailed = args?.detailed ?? false;
await this.updateSystemHealth();
const response = detailed ? this.systemHealth : {
status: this.systemHealth.status,
score: this.systemHealth.score,
timestamp: this.systemHealth.timestamp
};
return {
content: [{
type: 'text',
text: JSON.stringify(response, null, 2)
}]
};
}
private async handleExecuteCommand(args: any) {
const { command, params } = args;
const results: Record<string, any> = {
harvest: {
action: 'OmniHarvester scan initiated',
status: 'pending',
message: 'Scan will run in background'
},
analyze: {
action: 'Analysis requested',
status: 'queued',
target: params?.target || 'all'
},
search: {
action: 'Search executed',
query: params?.query || '',
results: []
},
status: {
action: 'Status check',
services: {
backend: 'running',
neo4j: neo4jAdapter.isHealthy() ? 'connected' : 'disconnected',
mcp: 'active'
}
},
refresh: {
action: 'Cache refresh',
status: 'completed',
timestamp: new Date().toISOString()
}
};
return {
content: [{
type: 'text',
text: JSON.stringify(results[command] || { error: 'Unknown command' }, null, 2)
}]
};
}
private async updateSystemHealth(): Promise<void> {
// Check Neo4j health
const neo4jHealth: any = await neo4jAdapter.healthCheck().catch(() => ({
connected: false,
lastCheck: new Date().toISOString()
}));
const components = [
{
name: 'Backend Server',
healthy: true,
latency: 12
},
{
name: 'Neo4j Database',
healthy: neo4jHealth.connected,
latency: neo4jHealth.latencyMs || 0,
message: neo4jHealth.connected
? `${neo4jHealth.nodeCount || 0} nodes, ${neo4jHealth.relationshipCount || 0} relationships`
: 'Connection failed'
},
{
name: 'MCP WebSocket',
healthy: true,
latency: 5
},
{
name: 'OmniHarvester',
healthy: true,
latency: 0
},
{
name: 'SelfHealingAdapter',
healthy: true,
latency: 8
}
];
const healthyCount = components.filter(c => c.healthy).length;
const score = healthyCount / components.length;
this.systemHealth = {
status: score >= 0.8 ? 'healthy' : score >= 0.5 ? 'degraded' : 'critical',
score,
components,
timestamp: new Date().toISOString()
};
}
private startHealthMonitoring(): void {
setInterval(() => {
this.updateSystemHealth().catch(console.error);
}, 30000);
}
// ═══════════════════════════════════════════════════════════════════════
// 🧠 COGNITIVE SENSE HANDLERS - Neural Bridge v2.2
// ═══════════════════════════════════════════════════════════════════════
/**
* The Cortical Flash (Hukommelse)
* Aktiverer "hele hjernen" ved at kombinere semantisk sΓΈgning med graf-relationer
*/
private async handleAssociativeMemory(args: any) {
const { concept, depth = 2 } = args;
const memoryTrace: any = {
concept,
activationTime: new Date().toISOString(),
semanticHits: [],
graphContext: [],
associatedConcepts: []
};
try {
// Phase 1: Direct concept search in graph
const directHits = await neo4jAdapter.executeQuery(`
MATCH (n)
WHERE n.name CONTAINS $concept OR n.description CONTAINS $concept
RETURN n.name as name, labels(n) as labels, n.description as description
LIMIT 10
`, { concept });
memoryTrace.semanticHits = directHits;
// Phase 2: Graph traversal - expand activation
const graphExpansion = await neo4jAdapter.executeQuery(`
MATCH (center)-[r*1..${depth}]-(related)
WHERE center.name CONTAINS $concept
RETURN DISTINCT related.name as name,
labels(related) as labels,
type(r[0]) as relationshipType
LIMIT 20
`, { concept });
memoryTrace.graphContext = graphExpansion;
// Phase 3: Find associated concepts (co-occurrence)
const associations = await neo4jAdapter.executeQuery(`
MATCH (n)-[r]->(m)
WHERE n.name CONTAINS $concept
RETURN DISTINCT m.name as associated, type(r) as via, count(*) as strength
ORDER BY strength DESC
LIMIT 10
`, { concept });
memoryTrace.associatedConcepts = associations;
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
sense: 'CORTICAL_FLASH',
memoryTrace,
summary: {
directHits: directHits.length,
expandedContext: graphExpansion.length,
associations: associations.length,
traversalDepth: depth
}
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
sense: 'CORTICAL_FLASH',
error: error.message,
concept
}, null, 2)
}]
};
}
}
/**
* Trigger ingestion for email/calendar sources (mocked until auth is wired)
*/
private async handleTriggerIngestion(args: any) {
const source = args?.source;
const limit = typeof args?.limit === 'number' ? args.limit : 5;
let ingestor;
switch (source) {
case 'email':
ingestor = new EmailIngestor();
break;
case 'calendar':
ingestor = new CalendarIngestor();
break;
default:
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'Invalid source. Use "email" or "calendar".'
}, null, 2)
}]
};
}
await ingestor.connect();
const items = await ingestor.fetchRecent(limit);
const nodes = items.map((item: any) => ingestor.transformToGraphNode(item));
const persisted: string[] = [];
for (const node of nodes) {
const labels = (node.labels && node.labels.length ? node.labels : ['Private']).map(l =>
l.replace(/[^A-Za-z0-9_]/g, '')
);
const labelString = labels.map(l => `:${l}`).join('');
const props: any = { ...(node.properties || {}) };
if (!props.externalId) {
props.externalId = randomUUID();
}
if (node.ownerUid) {
props.belongsTo = node.ownerUid;
if (!labels.includes('Private')) {
labels.push('Private');
}
}
const cypher = `
MERGE (n${labelString} {externalId: $props.externalId})
SET n += $props
WITH n
CALL {
WITH n
WHERE $ownerUid IS NOT NULL
MERGE (u:User {uid: $ownerUid})
ON CREATE SET u.name = 'Claus', u.role = 'Executive', u.access_level = 'god_mode'
MERGE (n)-[:BELONGS_TO]->(u)
RETURN count(*) AS rels
}
RETURN n.externalId AS externalId
`;
const res = await neo4jAdapter.executeQuery(cypher, { props, ownerUid: node.ownerUid ?? null });
if (res && res[0]?.externalId) {
persisted.push(res[0].externalId);
}
}
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
source,
fetched: items.length,
persisted: persisted.length,
sampleNodes: nodes.slice(0, 3)
}, null, 2)
}]
};
}
/**
* The Olfactory Sense (Integritet)
* "Lugter" til filer via MD5 Hash for at opdage mutationer
*/
private async handleMolecularSense(args: any) {
const { path: filePath } = args;
const crypto = await import('crypto');
try {
// Read file and calculate hash
const fileBuffer = await fs.readFile(filePath);
const olfactoryHash = crypto.createHash('md5').update(fileBuffer).digest('hex');
const stats = await fs.stat(filePath);
// Check if we have a stored hash in Neo4j
const storedState = await neo4jAdapter.executeQuery(`
MATCH (f:File {path: $path})
RETURN f.hash as storedHash, f.lastSeen as lastSeen
`, { path: filePath });
let status: 'STASIS' | 'MUTATION' | 'NEW_ENTITY' = 'NEW_ENTITY';
let mutation = null;
if (storedState.length > 0) {
const { storedHash, lastSeen } = storedState[0];
if (storedHash === olfactoryHash) {
status = 'STASIS';
} else {
status = 'MUTATION';
mutation = {
previousHash: storedHash,
currentHash: olfactoryHash,
lastSeen
};
}
}
// Update or create file node with new hash
await neo4jAdapter.executeQuery(`
MERGE (f:File {path: $path})
SET f.hash = $hash,
f.lastSeen = datetime(),
f.size = $size,
f.modified = $modified
`, {
path: filePath,
hash: olfactoryHash,
size: stats.size,
modified: stats.mtime.toISOString()
});
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
sense: 'OLFACTORY',
status,
olfactoryHash,
mutation,
file: {
path: filePath,
size: stats.size,
modified: stats.mtime.toISOString()
}
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
sense: 'OLFACTORY',
error: error.message,
path: filePath
}, null, 2)
}]
};
}
}
/**
* The Sonar Pulse (Ekkolod)
* MΓ₯ler afstand (latency) og "tekstur" (health) af services
*/
private async handleSonarPulse(args: any) {
const { target } = args;
const sonarEcho: any = {
target,
pulseTime: new Date().toISOString(),
latencyMs: 0,
quality: 'UNKNOWN',
field: 'UNKNOWN'
};
const start = process.hrtime.bigint();
let success = false;
try {
switch (target) {
case 'neo4j':
await neo4jAdapter.executeQuery('RETURN 1 as ping');
success = true;
break;
case 'postgres':
// Vector store health check (uses configured provider)
const { getVectorStore } = await import('../../platform/vector/index.js');
const vectorStore = await getVectorStore();
const stats = await vectorStore.getStatistics();
success = stats && stats.initialized;
break;
case 'filesystem':
await ensureSafeZoneExists();
await fs.access(SAFE_DESKTOP_PATH);
success = true;
break;
case 'internet':
const response = await fetch('https://www.google.com', {
method: 'HEAD',
signal: AbortSignal.timeout(5000)
});
success = response.ok;
break;
case 'backend':
const backendPort = process.env.PORT || 7860;
const backendResponse = await fetch(`http://localhost:${backendPort}/api/health`, {
signal: AbortSignal.timeout(5000)
});
success = backendResponse.ok;
break;
}
} catch (error: any) {
sonarEcho.error = error.message;
}
const end = process.hrtime.bigint();
sonarEcho.latencyMs = Number(end - start) / 1_000_000;
// Interpret distance/quality
if (success) {
if (sonarEcho.latencyMs < 10) {
sonarEcho.field = 'ULTRA_NEAR';
sonarEcho.quality = 'EXCELLENT';
} else if (sonarEcho.latencyMs < 50) {
sonarEcho.field = 'NEAR_FIELD';
sonarEcho.quality = 'GOOD';
} else if (sonarEcho.latencyMs < 100) {
sonarEcho.field = 'MID_FIELD';
sonarEcho.quality = 'ACCEPTABLE';
} else if (sonarEcho.latencyMs < 500) {
sonarEcho.field = 'FAR_FIELD';
sonarEcho.quality = 'DEGRADED';
} else {
sonarEcho.field = 'HORIZON';
sonarEcho.quality = 'CRITICAL';
}
} else {
sonarEcho.field = 'NO_ECHO';
sonarEcho.quality = 'UNREACHABLE';
}
return {
content: [{
type: 'text',
text: JSON.stringify({
success,
sense: 'SONAR',
sonarEcho,
interpretation: success
? `${target} responded in ${sonarEcho.latencyMs.toFixed(2)}ms (${sonarEcho.field})`
: `${target} is unreachable`
}, null, 2)
}]
};
}
// ═══════════════════════════════════════════════════════════════════════
// 🧩 KNOWLEDGE GAP HANDLERS - Curiosity & Sentinel Module v2.2
// ═══════════════════════════════════════════════════════════════════════
/**
* Register a knowledge gap - creates a :KnowledgeGap node in Neo4j
* Supports ONE_OFF (single answer) vs CONSTANT_STREAM (continuous monitoring)
*/
private async handleRegisterKnowledgeGap(args: any) {
const {
query,
lifecycle,
priority = 'medium',
context = '',
tags = []
} = args;
if (!query || !lifecycle) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'query and lifecycle are required parameters'
}, null, 2)
}],
isError: true
};
}
// Validate lifecycle
if (!['ONE_OFF', 'CONSTANT_STREAM'].includes(lifecycle)) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'lifecycle must be ONE_OFF or CONSTANT_STREAM'
}, null, 2)
}],
isError: true
};
}
const gapId = `gap-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
const createdAt = new Date().toISOString();
try {
// Create KnowledgeGap node in Neo4j
const result = await neo4jAdapter.executeQuery(`
CREATE (g:KnowledgeGap {
id: $gapId,
query: $query,
status: 'OPEN',
lifecycle: $lifecycle,
priority: $priority,
context: $context,
tags: $tags,
created_at: datetime($createdAt),
updated_at: datetime($createdAt),
source: 'neural-bridge',
resolution_count: 0
})
RETURN g.id as id, g.query as query, g.status as status,
g.lifecycle as lifecycle, g.priority as priority
`, { gapId, query, lifecycle, priority, context, tags, createdAt });
// Log the gap registration
console.error(`[Neural Bridge] 🧩 Knowledge Gap Registered: ${gapId} (${lifecycle})`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: 'KNOWLEDGE_GAP_REGISTERED',
gap: {
id: gapId,
query,
status: 'OPEN',
lifecycle,
priority,
context,
tags,
created_at: createdAt
},
message: lifecycle === 'CONSTANT_STREAM'
? `Sentinel monitoring activated for: "${query}"`
: `Curiosity gap registered: "${query}"`
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message,
hint: 'Ensure Neo4j is running and accessible'
}, null, 2)
}],
isError: true
};
}
}
/**
* Get knowledge gaps - query :KnowledgeGap nodes with optional filters
*/
private async handleGetKnowledgeGaps(args: any) {
const {
status,
lifecycle,
priority,
limit = 20
} = args || {};
try {
// Build dynamic WHERE clause
const conditions: string[] = [];
const params: any = { limit: parseInt(String(limit)) };
if (status) {
conditions.push('g.status = $status');
params.status = status;
}
if (lifecycle) {
conditions.push('g.lifecycle = $lifecycle');
params.lifecycle = lifecycle;
}
if (priority) {
conditions.push('g.priority = $priority');
params.priority = priority;
}
const whereClause = conditions.length > 0
? `WHERE ${conditions.join(' AND ')}`
: '';
const result = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap)
${whereClause}
RETURN g {
.id, .query, .status, .lifecycle, .priority,
.context, .tags, .resolution_count,
created_at: toString(g.created_at),
updated_at: toString(g.updated_at),
resolved_at: toString(g.resolved_at),
.last_resolution, .resolution_source
} as gap
ORDER BY
CASE g.priority
WHEN 'critical' THEN 0
WHEN 'high' THEN 1
WHEN 'medium' THEN 2
ELSE 3
END,
g.created_at DESC
LIMIT $limit
`, params);
// Count statistics
const stats = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap)
RETURN
g.status as status,
g.lifecycle as lifecycle,
count(*) as count
`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
filters: { status, lifecycle, priority },
count: result.length,
gaps: result.map(r => r.gap),
statistics: stats
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message,
hint: 'Neo4j query failed - check connection'
}, null, 2)
}],
isError: true
};
}
}
/**
* Resolve a knowledge gap - updates status and stores the resolution
*/
private async handleResolveKnowledgeGap(args: any) {
const { gapId, resolution, source = 'manual', keepMonitoring } = args;
if (!gapId || !resolution) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'gapId and resolution are required parameters'
}, null, 2)
}],
isError: true
};
}
const resolvedAt = new Date().toISOString();
try {
// First, get the gap to check its lifecycle
const existingGap = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap {id: $gapId})
RETURN g.lifecycle as lifecycle, g.status as status
`, { gapId });
if (existingGap.length === 0) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: `Knowledge gap not found: ${gapId}`
}, null, 2)
}],
isError: true
};
}
const { lifecycle } = existingGap[0];
// Determine new status based on lifecycle and keepMonitoring flag
let newStatus: string;
if (lifecycle === 'CONSTANT_STREAM') {
// For CONSTANT_STREAM, default is to keep monitoring unless explicitly set to false
newStatus = keepMonitoring === false ? 'RESOLVED' : 'IN_PROGRESS';
} else {
// For ONE_OFF, always resolve unless keepMonitoring is explicitly true
newStatus = keepMonitoring === true ? 'IN_PROGRESS' : 'RESOLVED';
}
// Update the gap with resolution + create KnowledgeNode per Entry #007 spec
const knowledgeNodeId = `kn-${Date.now()}-${Math.random().toString(36).substr(2, 6)}`;
const result = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap {id: $gapId})
SET g.status = $newStatus,
g.last_resolution = $resolution,
g.resolution_source = $source,
g.resolved_at = datetime($resolvedAt),
g.updated_at = datetime($resolvedAt),
g.resolution_count = coalesce(g.resolution_count, 0) + 1
WITH g
// Create KnowledgeNode (the actual knowledge artifact)
CREATE (kn:KnowledgeNode {
id: $knowledgeNodeId,
content: $resolution,
source: $source,
fromGap: $gapId,
originalQuery: g.query,
created_at: datetime($resolvedAt)
})
// Create SOLVED_BY relationship
CREATE (g)-[:SOLVED_BY]->(kn)
// Also keep resolution history node for tracking
CREATE (r:KnowledgeResolution {
gap_id: $gapId,
resolution: $resolution,
source: $source,
knowledgeNodeId: $knowledgeNodeId,
resolved_at: datetime($resolvedAt)
})
CREATE (g)-[:HAS_RESOLUTION]->(r)
RETURN g {
.id, .query, .status, .lifecycle, .priority,
.last_resolution, .resolution_source, .resolution_count,
resolved_at: toString(g.resolved_at)
} as gap, $knowledgeNodeId as knowledgeNodeId
`, { gapId, newStatus, resolution, source, resolvedAt, knowledgeNodeId });
const updatedGap = result[0]?.gap;
// Log the resolution
console.error(`[Neural Bridge] 🧩 Knowledge Gap ${newStatus === 'RESOLVED' ? 'Resolved' : 'Updated'}: ${gapId}`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: newStatus === 'RESOLVED' ? 'KNOWLEDGE_GAP_RESOLVED' : 'KNOWLEDGE_GAP_UPDATED',
gap: updatedGap,
knowledgeNode: {
id: result[0]?.knowledgeNodeId,
relationship: 'SOLVED_BY'
},
message: lifecycle === 'CONSTANT_STREAM' && newStatus !== 'RESOLVED'
? `Sentinel updated - continuing to monitor: "${updatedGap?.query}"`
: `Knowledge gap resolved: "${updatedGap?.query}" β†’ KnowledgeNode created`
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message,
hint: 'Failed to resolve knowledge gap'
}, null, 2)
}],
isError: true
};
}
}
// ═══════════════════════════════════════════════════════════════════════
// πŸ€– SENTINEL ENGINE HANDLERS - Advanced Autonomous Features v2.3
// ═══════════════════════════════════════════════════════════════════════
/**
* Rate a resolution for feedback loop learning
*/
private async handleRateResolution(args: any) {
const { gapId, resolutionIndex = 0, wasUseful, qualityScore } = args;
if (!gapId || wasUseful === undefined || qualityScore === undefined) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'gapId, wasUseful, and qualityScore are required'
}, null, 2)
}],
isError: true
};
}
try {
await sentinelEngine.recordFeedback(gapId, resolutionIndex, wasUseful, qualityScore);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: 'FEEDBACK_RECORDED',
gapId,
resolutionIndex,
wasUseful,
qualityScore,
message: 'Feedback recorded - system will learn from this'
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Get learning insights from the feedback loop
*/
private async handleGetLearningInsights(_args: any) {
try {
const insights = await sentinelEngine.getLearningInsights();
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
insights,
message: 'Learning insights generated from resolution feedback'
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Get Sentinel Engine status
*/
private async handleGetSentinelStatus(args: any) {
const { includeStaleGaps = false, includeDueForCheck = false } = args || {};
try {
const status: any = {
engine: 'Sentinel Engine v2.3',
temporalMonitoring: 'ACTIVE',
features: {
autoGapDetection: 'ENABLED',
autoResolutionRouting: 'ENABLED',
temporalMonitoring: 'ENABLED',
feedbackLoop: 'ENABLED'
},
timestamp: new Date().toISOString()
};
// Get gap statistics
const gapStats = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap)
RETURN
g.status as status,
g.lifecycle as lifecycle,
count(*) as count
`);
status.gapStatistics = gapStats;
// Count gaps needing attention
const needsAttention = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap)
WHERE g.needsHumanIntervention = true
RETURN count(g) as count
`);
status.gapsNeedingHumanIntervention = needsAttention[0]?.count || 0;
if (includeStaleGaps) {
const staleGaps = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap)
WHERE g.status = 'STALE'
RETURN g.id as id, g.query as query,
toString(g.created_at) as created_at
LIMIT 10
`);
status.staleGaps = staleGaps;
}
if (includeDueForCheck) {
const dueGaps = await sentinelEngine.getGapsDueForCheck();
status.gapsDueForCheck = dueGaps;
}
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
status
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Set schedule for CONSTANT_STREAM gap
*/
private async handleSetGapSchedule(args: any) {
const { gapId, checkIntervalHours } = args;
if (!gapId || !checkIntervalHours) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'gapId and checkIntervalHours are required'
}, null, 2)
}],
isError: true
};
}
try {
await sentinelEngine.setCheckInterval(gapId, checkIntervalHours);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: 'SCHEDULE_SET',
gapId,
checkIntervalHours,
message: `Gap will be checked every ${checkIntervalHours} hours`
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Get resolution history for a gap
*/
private async handleGetResolutionHistory(args: any) {
const { gapId } = args;
if (!gapId) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'gapId is required'
}, null, 2)
}],
isError: true
};
}
try {
const history = await sentinelEngine.getResolutionHistory(gapId);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
gapId,
resolutionCount: history.length,
history
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Manually trigger auto-resolution for a gap
*/
private async handleTriggerGapResolution(args: any) {
const { gapId } = args;
if (!gapId) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'gapId is required'
}, null, 2)
}],
isError: true
};
}
try {
// Get gap details
const gap = await neo4jAdapter.executeQuery(`
MATCH (g:KnowledgeGap {id: $gapId})
RETURN g.query as query, g.gapType as gapType
`, { gapId });
if (gap.length === 0) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: `Gap not found: ${gapId}`
}, null, 2)
}],
isError: true
};
}
const { query, gapType } = gap[0];
const result = await sentinelEngine.attemptAutoResolution(gapId, query, gapType || 'unknown');
if (result) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: 'AUTO_RESOLUTION_TRIGGERED',
gapId,
result: {
strategy: result.strategy,
resolved: result.success,
source: result.source,
duration_ms: result.duration_ms
},
message: result.success
? `Successfully resolved via ${result.strategy}`
: 'Resolution attempted but no suitable answer found'
}, null, 2)
}]
};
} else {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
action: 'AUTO_RESOLUTION_FAILED',
gapId,
message: 'All resolution strategies exhausted - gap marked for human intervention'
}, null, 2)
}]
};
}
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
// ═══════════════════════════════════════════════════════════════════════
// 🎭 THE MUSE HANDLERS - Idea Incubation System v1.0
// ═══════════════════════════════════════════════════════════════════════
/**
* Incubate an idea - store it for later consideration
* This tool ONLY saves data - it does NOT trigger any actions
*/
private async handleIncubateIdea(args: any) {
const {
title,
hypothesis,
confidence = 0.5,
tags = [],
agent = 'unknown',
relatedTo
} = args;
if (!title || !hypothesis) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'title and hypothesis are required'
}, null, 2)
}],
isError: true
};
}
const ideaId = `idea-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
const createdAt = new Date().toISOString();
try {
// Create Idea node in Neo4j
await neo4jAdapter.executeQuery(`
CREATE (i:Idea {
id: $ideaId,
title: $title,
hypothesis: $hypothesis,
confidence: $confidence,
tags: $tags,
status: 'INCUBATED',
proposedBy: $agent,
relatedTo: $relatedTo,
created_at: datetime($createdAt),
updated_at: datetime($createdAt)
})
WITH i
// Link to agent if specified
OPTIONAL MATCH (a:Agent {name: $agent})
FOREACH (_ IN CASE WHEN a IS NOT NULL THEN [1] ELSE [] END |
CREATE (a)-[:PROPOSED]->(i)
)
RETURN i.id as id
`, { ideaId, title, hypothesis, confidence, tags, agent, relatedTo, createdAt });
console.error(`[The Muse] 🎭 Idea incubated: "${title}" (confidence: ${confidence})`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: 'IDEA_INCUBATED',
idea: {
id: ideaId,
title,
hypothesis,
confidence,
tags,
status: 'INCUBATED',
proposedBy: agent,
created_at: createdAt
},
message: `πŸ’‘ Idea stored for incubation: "${title}". No actions triggered.`
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message,
hint: 'Failed to store idea in Neo4j'
}, null, 2)
}],
isError: true
};
}
}
/**
* Get incubated ideas with optional filters
*/
private async handleGetIdeas(args: any) {
const {
status,
tag,
minConfidence,
agent,
limit = 20
} = args || {};
try {
// Build dynamic WHERE clause
const conditions: string[] = [];
const params: any = { limit: parseInt(String(limit)) };
if (status) {
conditions.push('i.status = $status');
params.status = status;
}
if (tag) {
conditions.push('$tag IN i.tags');
params.tag = tag;
}
if (minConfidence !== undefined) {
conditions.push('i.confidence >= $minConfidence');
params.minConfidence = minConfidence;
}
if (agent) {
conditions.push('i.proposedBy = $agent');
params.agent = agent;
}
const whereClause = conditions.length > 0
? `WHERE ${conditions.join(' AND ')}`
: '';
const result = await neo4jAdapter.executeQuery(`
MATCH (i:Idea)
${whereClause}
RETURN i {
.id, .title, .hypothesis, .confidence, .tags,
.status, .proposedBy, .relatedTo, .reason,
created_at: toString(i.created_at),
updated_at: toString(i.updated_at),
promoted_at: toString(i.promoted_at)
} as idea
ORDER BY i.confidence DESC, i.created_at DESC
LIMIT $limit
`, params);
// Get statistics
const stats = await neo4jAdapter.executeQuery(`
MATCH (i:Idea)
RETURN i.status as status, count(*) as count
`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
filters: { status, tag, minConfidence, agent },
count: result.length,
ideas: result.map(r => r.idea),
statistics: stats
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
/**
* Promote, reject, or mark an idea as implemented
*/
private async handlePromoteIdea(args: any) {
const { ideaId, action, reason = '', taskDescription } = args;
if (!ideaId || !action) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'ideaId and action are required'
}, null, 2)
}],
isError: true
};
}
if (!['PROMOTE', 'REJECT', 'IMPLEMENT'].includes(action)) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'action must be PROMOTE, REJECT, or IMPLEMENT'
}, null, 2)
}],
isError: true
};
}
const updatedAt = new Date().toISOString();
try {
// Map action to status
const statusMap: Record<string, string> = {
'PROMOTE': 'PROMOTED',
'REJECT': 'REJECTED',
'IMPLEMENT': 'IMPLEMENTED'
};
const newStatus = statusMap[action];
// Update the idea
const result = await neo4jAdapter.executeQuery(`
MATCH (i:Idea {id: $ideaId})
SET i.status = $newStatus,
i.reason = $reason,
i.updated_at = datetime($updatedAt),
i.promoted_at = CASE WHEN $action = 'PROMOTE' THEN datetime($updatedAt) ELSE i.promoted_at END
WITH i
// If promoting, optionally create a Task node
FOREACH (_ IN CASE WHEN $action = 'PROMOTE' AND $taskDescription IS NOT NULL THEN [1] ELSE [] END |
CREATE (t:Task {
id: 'task-' + toString(timestamp()),
title: i.title,
description: $taskDescription,
status: 'PENDING',
fromIdea: $ideaId,
created_at: datetime($updatedAt)
})
CREATE (i)-[:PROMOTED_TO]->(t)
)
RETURN i {
.id, .title, .status, .reason,
updated_at: toString(i.updated_at)
} as idea
`, { ideaId, newStatus, reason, action, taskDescription, updatedAt });
if (result.length === 0) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: `Idea not found: ${ideaId}`
}, null, 2)
}],
isError: true
};
}
const actionMessages: Record<string, string> = {
'PROMOTE': `πŸš€ Idea promoted to task: "${result[0].idea.title}"`,
'REJECT': `❌ Idea rejected: "${result[0].idea.title}"`,
'IMPLEMENT': `βœ… Idea marked as implemented: "${result[0].idea.title}"`
};
console.error(`[The Muse] ${actionMessages[action]}`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
action: `IDEA_${action}ED`,
idea: result[0].idea,
message: actionMessages[action],
taskCreated: action === 'PROMOTE' && taskDescription ? true : false
}, null, 2)
}]
};
} catch (error: any) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
// ═══════════════════════════════════════════════════════════════════════
// πŸ‘οΈ THE VISUAL CORTEX - Frontend Perception Handlers v1.0
// ═══════════════════════════════════════════════════════════════════════
private browserInstance: Browser | null = null;
/**
* Get or create browser instance (lazy initialization)
*/
private async getBrowser(): Promise<Browser> {
if (!this.browserInstance) {
this.browserInstance = await puppeteer.launch({
headless: true,
args: ['--no-sandbox', '--disable-setuid-sandbox', '--disable-dev-shm-usage']
});
}
return this.browserInstance;
}
/**
* Inspect the frontend using Puppeteer
*/
private async handleInspectFrontend(args: any) {
const {
route = '/',
action,
selector,
viewport = { width: 1920, height: 1080 },
waitFor = 2000
} = args;
if (!action) {
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: 'action is required (SCREENSHOT, ANALYZE_LAYOUT, CHECK_CONSOLE_ERRORS, or FULL_AUDIT)'
}, null, 2)
}],
isError: true
};
}
const url = `${FRONTEND_URL}${route}`;
const consoleMessages: Array<{ type: string; text: string; timestamp: string }> = [];
const consoleErrors: Array<{ type: string; text: string; timestamp: string }> = [];
let page: Page | null = null;
try {
// Ensure snapshots directory exists
await ensureSafeZoneExists(); // From FileSystemTools
await fs.mkdir(SNAPSHOTS_PATH, { recursive: true });
// Check if frontend is reachable
const browser = await this.getBrowser();
page = await browser.newPage();
// Set viewport
await page.setViewport({
width: viewport.width || 1920,
height: viewport.height || 1080
});
// Capture console messages
page.on('console', msg => {
const entry = {
type: msg.type(),
text: msg.text(),
timestamp: new Date().toISOString()
};
consoleMessages.push(entry);
if (['error', 'warning'].includes(msg.type())) {
consoleErrors.push(entry);
}
});
// Navigate with timeout
try {
await page.goto(url, { waitUntil: 'networkidle2', timeout: 15000 });
} catch (navError: any) {
await page.close();
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: `Frontend unreachable at ${url}`,
hint: 'Is the frontend running? Start with: npm run dev:frontend',
details: navError.message
}, null, 2)
}],
isError: true
};
}
// Wait for dynamic content
await new Promise(resolve => setTimeout(resolve, waitFor));
const result: any = {
success: true,
url,
route,
timestamp: new Date().toISOString(),
viewport
};
// Execute based on action
if (action === 'SCREENSHOT' || action === 'FULL_AUDIT') {
const timestamp = Date.now();
const filename = `snapshot-${timestamp}.png`;
const filepath = path.join(SNAPSHOTS_PATH, filename);
if (selector) {
// Screenshot specific element
const element = await page.$(selector);
if (element) {
await element.screenshot({ path: filepath });
result.screenshot = {
path: filepath,
filename,
selector,
type: 'element'
};
} else {
result.screenshot = {
error: `Selector "${selector}" not found`,
type: 'element_not_found'
};
}
} else {
// Full page screenshot
await page.screenshot({ path: filepath, fullPage: true });
result.screenshot = {
path: filepath,
filename,
type: 'full_page'
};
}
// Create Artifact node in Neo4j
if (result.screenshot?.path) {
const artifactId = `artifact-${timestamp}`;
await neo4jAdapter.executeQuery(`
CREATE (a:Artifact {
id: $artifactId,
type: 'SNAPSHOT',
path: $path,
filename: $filename,
url: $url,
route: $route,
selector: $selector,
created_at: datetime()
})
WITH a
OPTIONAL MATCH (agent:Agent {name: 'claude'})
FOREACH (_ IN CASE WHEN agent IS NOT NULL THEN [1] ELSE [] END |
CREATE (agent)-[:CREATED]->(a)
)
`, { artifactId, path: filepath, filename, url, route, selector: selector || null });
result.screenshot.artifactId = artifactId;
}
}
if (action === 'ANALYZE_LAYOUT' || action === 'FULL_AUDIT') {
// Get page structure analysis
const layoutAnalysis = await page.evaluate(() => {
const analysis: any = {
title: document.title,
bodyClasses: document.body.className,
elements: {
total: document.querySelectorAll('*').length,
buttons: document.querySelectorAll('button').length,
inputs: document.querySelectorAll('input, textarea, select').length,
links: document.querySelectorAll('a').length,
images: document.querySelectorAll('img').length,
widgets: document.querySelectorAll('[class*="widget"], [data-widget]').length
},
visibleText: document.body.innerText.slice(0, 500) + '...',
mainSections: Array.from(document.querySelectorAll('main, section, header, footer, nav, aside'))
.map(el => ({
tag: el.tagName.toLowerCase(),
id: el.id || null,
class: el.className || null
}))
};
return analysis;
});
result.layout = layoutAnalysis;
}
if (action === 'CHECK_CONSOLE_ERRORS' || action === 'FULL_AUDIT') {
result.console = {
totalMessages: consoleMessages.length,
errors: consoleErrors,
errorCount: consoleErrors.filter(m => m.type === 'error').length,
warningCount: consoleErrors.filter(m => m.type === 'warning').length,
hasErrors: consoleErrors.some(m => m.type === 'error')
};
}
await page.close();
// Log the inspection
console.error(`[Visual Cortex] πŸ‘οΈ Inspected ${url} - ${action}`);
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
result
}, null, 2)
}]
};
} catch (error: any) {
if (page) await page.close();
return {
content: [{
type: 'text',
text: JSON.stringify({
success: false,
error: error.message
}, null, 2)
}],
isError: true
};
}
}
// ═══════════════════════════════════════════════════════════════════════
// 🎨 LIQUID UI HANDLER - Dynamic Component Rendering v1.0
// ═══════════════════════════════════════════════════════════════════════
/**
* Renders a dynamic UI component for the chat interface
*/
private async handleRenderUIComponent(args: any) {
const { component, props } = args;
// In a real implementation, this would validate props against a schema
// and potentially register the component state in the backend
return {
content: [{
type: 'text',
text: JSON.stringify({
_type: 'ui_component',
component,
props
}, null, 2)
}]
};
}
/**
* Generates Mermaid diagram code
*/
private async handleGenerateDiagram(args: any) {
const { description, diagram_type, style = 'detailed' } = args;
// In a real implementation, this would call an LLM to generate the Mermaid code.
// For now, we'll return a template based on type.
let mermaidCode = '';
switch (diagram_type) {
case 'flowchart':
mermaidCode = `graph TD\n A[Start] --> B{Decision}\n B -->|Yes| C[Process]\n B -->|No| D[End]`;
break;
case 'sequence':
mermaidCode = `sequenceDiagram\n participant U as User\n participant S as System\n U->>S: Request\n S-->>U: Response`;
break;
default:
mermaidCode = `graph TD\n A[Node 1] --> B[Node 2]`;
}
return {
content: [{
type: 'text',
text: mermaidCode
}]
};
}
/**
* Exports a visual asset
*/
private async handleExportVisual(args: any) {
const { content_type, format, title } = args;
return {
content: [{
type: 'text',
text: JSON.stringify({
success: true,
message: `Exported ${content_type} as ${format}`,
url: `/downloads/export-${Date.now()}.${format}`
}, null, 2)
}]
};
}
// ═══════════════════════════════════════════════════════════════════════
// 🧠 COGNITIVE MODULE HANDLERS - Specialized Brain Functions v1.0
// ═══════════════════════════════════════════════════════════════════════
private async handleListenToLogs(args: any) {
const { action, source, sessionId, logs, filter } = args;
if (action === 'ANALYZE_LOGS' && logs) {
const analysis = auditoryService.analyzeLogContent(logs);
return {
content: [{
type: 'text',
text: JSON.stringify(analysis, null, 2)
}]
};
}
if (action === 'GET_STATUS') {
return {
content: [{
type: 'text',
text: JSON.stringify({ status: 'active', listeners: 0 }, null, 2)
}]
};
}
return {
content: [{
type: 'text',
text: JSON.stringify({ success: true, action }, null, 2)
}]
};
}
private async handleExecuteAction(args: any) {
const { action, actionType, command, targetPath, content, params, actionId, requestedBy } = args;
if (action === 'REQUEST') {
const request = await motorCortex.requestAction({
type: actionType,
description: args.description,
command,
targetPath,
content,
params,
requestedBy: requestedBy || 'claude'
});
return {
content: [{
type: 'text',
text: JSON.stringify(request, null, 2)
}]
};
}
if (action === 'APPROVE' && actionId) {
const result = await motorCortex.approveAction(actionId, requestedBy || 'human');
return {
content: [{
type: 'text',
text: JSON.stringify(result, null, 2)
}]
};
}
return {
content: [{
type: 'text',
text: JSON.stringify({ success: false, error: 'Invalid action' }, null, 2)
}]
};
}
private async handleMemoryOperation(args: any) {
const { action, episode, episodeId, fact, query } = args;
if (action === 'RECORD_EPISODE' && episode) {
const id = await temporalLobe.recordEpisode(episode);
return {
content: [{
type: 'text',
text: JSON.stringify({ success: true, episodeId: id }, null, 2)
}]
};
}
if (action === 'SEARCH_EPISODES' && query) {
const episodes = await temporalLobe.searchEpisodes(query);
return {
content: [{
type: 'text',
text: JSON.stringify({ count: episodes.length, episodes }, null, 2)
}]
};
}
return {
content: [{
type: 'text',
text: JSON.stringify({ success: false, error: 'Invalid memory operation' }, null, 2)
}]
};
}
private async handleStrategicPlanning(args: any) {
const { action, goal, goalId, plan, decision } = args;
if (action === 'CREATE_GOAL' && goal) {
const id = await prefrontalCortex.createGoal(goal);
return {
content: [{
type: 'text',
text: JSON.stringify({ success: true, goalId: id }, null, 2)
}]
};
}
if (action === 'GET_GOALS') {
const goals = await prefrontalCortex.getGoals(args.filter);
return {
content: [{
type: 'text',
text: JSON.stringify({ count: goals.length, goals }, null, 2)
}]
};
}
return {
content: [{
type: 'text',
text: JSON.stringify({ success: false, error: 'Invalid planning operation' }, null, 2)
}]
};
}
// ═══════════════════════════════════════════════════════════════════════
// Server Startup
// ═══════════════════════════════════════════════════════════════════════
async start() {
const transport = new StdioServerTransport();
await this.server.connect(transport);
console.error('Neural Bridge Server running on stdio');
}
}
// Start the server if this file is run directly
if (import.meta.url === `file://${process.argv[1]}`) {
const server = new NeuralBridgeServer();
server.start().catch((error) => {
console.error('Fatal error in Neural Bridge Server:', error);
process.exit(1);
});
}
export { NeuralBridgeServer };