Spaces:
Paused
Paused
File size: 5,468 Bytes
34367da | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { neuralCortex, CortexQuery } from '../../src/services/NeuralChat/NeuralCortex';
import { neo4jAdapter } from '../../src/adapters/Neo4jAdapter';
const mockVectorStore = vi.hoisted(() => ({
upsert: vi.fn(),
search: vi.fn(),
initialize: vi.fn(),
}));
// Mock dependencies
vi.mock('../../src/adapters/Neo4jAdapter', () => ({
neo4jAdapter: {
runQuery: vi.fn(),
},
}));
vi.mock('../../src/platform/vector/index', () => ({
getVectorStore: vi.fn(),
}));
// Setup the mock implementation result after hoisting
vi.mocked(await import('../../src/platform/vector/index')).getVectorStore.mockResolvedValue(mockVectorStore as any);
describe('NeuralCortex (Hybrid RAG)', () => {
beforeEach(() => {
vi.clearAllMocks();
// Reset vector store mock
mockVectorStore.search.mockReset();
mockVectorStore.upsert.mockReset();
mockVectorStore.initialize.mockReset();
});
afterEach(() => {
vi.clearAllMocks();
});
describe('processMessage', () => {
it('should store message in both Graph and Vector store', async () => {
const message = {
id: 'msg-123',
from: 'gemini' as any,
channel: 'core-dev',
body: 'We should use pgvector for semantic search and Neo4j for graphs.',
timestamp: new Date().toISOString(),
type: 'chat' as any,
priority: 'normal' as any,
};
// Mock Graph response
(neo4jAdapter.runQuery as any).mockResolvedValue([]);
const result = await neuralCortex.processMessage(message);
// Verify Graph interactions (called for message node + entities/concepts)
expect(neo4jAdapter.runQuery).toHaveBeenCalled();
expect(neo4jAdapter.runQuery).toHaveBeenCalledTimes(4);
// Verify Vector interaction
expect(mockVectorStore.upsert).toHaveBeenCalledWith(expect.objectContaining({
id: 'msg-123',
content: message.body,
namespace: 'neural_chat',
metadata: expect.objectContaining({
type: 'message',
from: 'gemini',
channel: 'core-dev',
concepts: expect.arrayContaining(['pgvector', 'neo4j'])
})
}));
expect(result.vectorStored).toBe(true);
// Verify concepts were extracted
expect(result.concepts).toContain('neo4j');
expect(result.concepts).toContain('pgvector');
expect(result.concepts.length).toBeGreaterThan(0);
});
});
describe('query (Hybrid Search)', () => {
it('should combine results from Vector and Graph search', async () => {
const query: CortexQuery = {
type: 'search',
query: 'database architecture',
};
// Mock Vector Results (Semantic)
mockVectorStore.search.mockResolvedValue([
{
id: 'doc-1',
content: 'PostgreSQL is a relational database.',
similarity: 0.9,
metadata: { title: 'Postgres Guide', type: 'Document' }
}
]);
// Mock Graph Results (Keyword/Structural)
(neo4jAdapter.runQuery as any).mockResolvedValue([
{
n: { properties: { name: 'Neo4j', id: 'node-2' } },
types: ['Technology'],
connections: []
}
]);
const results = await neuralCortex.query(query);
// Should have 2 results
expect(results).toHaveLength(2);
// Check sources
const vectorResult = results.find(r => r.source === 'semantic_search');
const graphResult = results.find(r => r.source === 'knowledge_graph');
expect(vectorResult).toBeDefined();
expect(vectorResult?.data.name).toBe('Postgres Guide');
expect(vectorResult?.relevance).toBe(0.9);
expect(graphResult).toBeDefined();
expect(graphResult?.data.name).toBe('Neo4j');
});
it('should fallback gracefully if vector store fails', async () => {
const query: CortexQuery = {
type: 'search',
query: 'resilient system',
};
// Mock Vector Failure
mockVectorStore.search.mockRejectedValue(new Error('Vector DB offline'));
// Mock Graph Success
(neo4jAdapter.runQuery as any).mockResolvedValue([
{
n: { properties: { name: 'SelfHealingAdapter', id: 'node-3' } },
types: ['Service'],
connections: []
}
]);
const results = await neuralCortex.query(query);
// Should still return graph results
expect(results).toHaveLength(1);
expect(results[0].data.name).toBe('SelfHealingAdapter');
expect(results[0].source).toBe('knowledge_graph');
});
});
});
|