ruvector-fixed / dist /core /neural-perf.js
Archie
Fix dimension/dimensions bug and positional insert/search args
40d7073
"use strict";
/**
* Neural Performance Optimizations
*
* High-performance utilities for neural embedding operations:
* - O(1) LRU Cache with doubly-linked list + hash map
* - Parallel batch processing
* - Pre-allocated Float32Array buffer pools
* - Tensor buffer reuse
* - 8x loop unrolling for vector operations
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.OptimizedMemoryStore = exports.ParallelBatchProcessor = exports.VectorOps = exports.TensorBufferManager = exports.Float32BufferPool = exports.LRUCache = exports.PERF_CONSTANTS = void 0;
// ============================================================================
// Constants
// ============================================================================
exports.PERF_CONSTANTS = {
DEFAULT_CACHE_SIZE: 1000,
DEFAULT_BUFFER_POOL_SIZE: 64,
DEFAULT_BATCH_SIZE: 32,
MIN_PARALLEL_BATCH_SIZE: 8,
UNROLL_THRESHOLD: 32, // Min dimension for loop unrolling
};
/**
* High-performance LRU Cache with O(1) get, set, and eviction.
* Uses doubly-linked list for ordering + hash map for O(1) lookup.
*/
class LRUCache {
constructor(capacity = exports.PERF_CONSTANTS.DEFAULT_CACHE_SIZE) {
this.map = new Map();
this.head = null; // Most recently used
this.tail = null; // Least recently used
// Stats
this.hits = 0;
this.misses = 0;
if (capacity < 1)
throw new Error('Cache capacity must be >= 1');
this.capacity = capacity;
}
/**
* Get value from cache - O(1)
*/
get(key) {
const node = this.map.get(key);
if (!node) {
this.misses++;
return undefined;
}
this.hits++;
// Move to head (most recently used)
this.moveToHead(node);
return node.value;
}
/**
* Set value in cache - O(1)
*/
set(key, value) {
const existing = this.map.get(key);
if (existing) {
// Update existing node
existing.value = value;
this.moveToHead(existing);
return;
}
// Create new node
const node = { key, value, prev: null, next: null };
// Evict if at capacity
if (this.map.size >= this.capacity) {
this.evictLRU();
}
// Add to map and list
this.map.set(key, node);
this.addToHead(node);
}
/**
* Check if key exists - O(1)
*/
has(key) {
return this.map.has(key);
}
/**
* Delete key from cache - O(1)
*/
delete(key) {
const node = this.map.get(key);
if (!node)
return false;
this.removeNode(node);
this.map.delete(key);
return true;
}
/**
* Clear entire cache - O(1)
*/
clear() {
this.map.clear();
this.head = null;
this.tail = null;
}
/**
* Get cache size
*/
get size() {
return this.map.size;
}
/**
* Get cache statistics
*/
getStats() {
const total = this.hits + this.misses;
return {
size: this.map.size,
capacity: this.capacity,
hits: this.hits,
misses: this.misses,
hitRate: total > 0 ? this.hits / total : 0,
};
}
/**
* Reset statistics
*/
resetStats() {
this.hits = 0;
this.misses = 0;
}
// Internal: Move existing node to head
moveToHead(node) {
if (node === this.head)
return;
this.removeNode(node);
this.addToHead(node);
}
// Internal: Add new node to head
addToHead(node) {
node.prev = null;
node.next = this.head;
if (this.head) {
this.head.prev = node;
}
this.head = node;
if (!this.tail) {
this.tail = node;
}
}
// Internal: Remove node from list
removeNode(node) {
if (node.prev) {
node.prev.next = node.next;
}
else {
this.head = node.next;
}
if (node.next) {
node.next.prev = node.prev;
}
else {
this.tail = node.prev;
}
}
// Internal: Evict least recently used (tail)
evictLRU() {
if (!this.tail)
return;
this.map.delete(this.tail.key);
this.removeNode(this.tail);
}
/**
* Iterate over entries (most recent first)
*/
*entries() {
let current = this.head;
while (current) {
yield [current.key, current.value];
current = current.next;
}
}
}
exports.LRUCache = LRUCache;
// ============================================================================
// P1: Pre-allocated Float32Array Buffer Pool
// ============================================================================
/**
* High-performance buffer pool for Float32Arrays.
* Eliminates GC pressure by reusing pre-allocated buffers.
*/
class Float32BufferPool {
constructor(maxPoolSize = exports.PERF_CONSTANTS.DEFAULT_BUFFER_POOL_SIZE) {
this.pools = new Map();
// Stats
this.allocations = 0;
this.reuses = 0;
this.maxPoolSize = maxPoolSize;
}
/**
* Acquire a buffer of specified size - O(1) amortized
*/
acquire(size) {
const pool = this.pools.get(size);
if (pool && pool.length > 0) {
this.reuses++;
return pool.pop();
}
this.allocations++;
return new Float32Array(size);
}
/**
* Release a buffer back to the pool - O(1)
*/
release(buffer) {
const size = buffer.length;
let pool = this.pools.get(size);
if (!pool) {
pool = [];
this.pools.set(size, pool);
}
// Only keep up to maxPoolSize buffers per size
if (pool.length < this.maxPoolSize) {
// Zero out for security
buffer.fill(0);
pool.push(buffer);
}
}
/**
* Pre-warm the pool with buffers of specific sizes
*/
prewarm(sizes, count = 8) {
for (const size of sizes) {
let pool = this.pools.get(size);
if (!pool) {
pool = [];
this.pools.set(size, pool);
}
while (pool.length < count) {
pool.push(new Float32Array(size));
this.allocations++;
}
}
}
/**
* Clear all pools
*/
clear() {
this.pools.clear();
}
/**
* Get pool statistics
*/
getStats() {
let pooledBuffers = 0;
for (const pool of this.pools.values()) {
pooledBuffers += pool.length;
}
const total = this.allocations + this.reuses;
return {
allocations: this.allocations,
reuses: this.reuses,
reuseRate: total > 0 ? this.reuses / total : 0,
pooledBuffers,
};
}
}
exports.Float32BufferPool = Float32BufferPool;
// ============================================================================
// P1: Tensor Buffer Manager (Reusable Working Memory)
// ============================================================================
/**
* Manages reusable tensor buffers for intermediate computations.
* Reduces allocations in hot paths.
*/
class TensorBufferManager {
constructor(pool) {
this.workingBuffers = new Map();
this.bufferPool = pool ?? new Float32BufferPool();
}
/**
* Get or create a named working buffer
*/
getWorking(name, size) {
const existing = this.workingBuffers.get(name);
if (existing && existing.length === size) {
return existing;
}
// Release old buffer if size changed
if (existing) {
this.bufferPool.release(existing);
}
const buffer = this.bufferPool.acquire(size);
this.workingBuffers.set(name, buffer);
return buffer;
}
/**
* Get a temporary buffer (caller must release)
*/
getTemp(size) {
return this.bufferPool.acquire(size);
}
/**
* Release a temporary buffer
*/
releaseTemp(buffer) {
this.bufferPool.release(buffer);
}
/**
* Release all working buffers
*/
releaseAll() {
for (const buffer of this.workingBuffers.values()) {
this.bufferPool.release(buffer);
}
this.workingBuffers.clear();
}
/**
* Get underlying pool for stats
*/
getPool() {
return this.bufferPool;
}
}
exports.TensorBufferManager = TensorBufferManager;
// ============================================================================
// P2: 8x Loop Unrolling Vector Operations
// ============================================================================
/**
* High-performance vector operations with 8x loop unrolling.
* Provides 15-30% speedup on large vectors.
*/
exports.VectorOps = {
/**
* Dot product with 8x unrolling
*/
dot(a, b) {
const len = a.length;
let sum = 0;
// 8x unrolled loop
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
sum += a[i] * b[i]
+ a[i + 1] * b[i + 1]
+ a[i + 2] * b[i + 2]
+ a[i + 3] * b[i + 3]
+ a[i + 4] * b[i + 4]
+ a[i + 5] * b[i + 5]
+ a[i + 6] * b[i + 6]
+ a[i + 7] * b[i + 7];
}
// Handle remainder
for (; i < len; i++) {
sum += a[i] * b[i];
}
return sum;
},
/**
* Squared L2 norm with 8x unrolling
*/
normSq(a) {
const len = a.length;
let sum = 0;
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
sum += a[i] * a[i]
+ a[i + 1] * a[i + 1]
+ a[i + 2] * a[i + 2]
+ a[i + 3] * a[i + 3]
+ a[i + 4] * a[i + 4]
+ a[i + 5] * a[i + 5]
+ a[i + 6] * a[i + 6]
+ a[i + 7] * a[i + 7];
}
for (; i < len; i++) {
sum += a[i] * a[i];
}
return sum;
},
/**
* L2 norm
*/
norm(a) {
return Math.sqrt(exports.VectorOps.normSq(a));
},
/**
* Cosine similarity - optimized for V8 JIT
* Uses 4x unrolling which benchmarks faster than 8x due to register pressure
*/
cosine(a, b) {
const len = a.length;
let dot = 0, normA = 0, normB = 0;
// 4x unroll is optimal for cosine (less register pressure)
const unrolled = len - (len % 4);
let i = 0;
for (; i < unrolled; i += 4) {
const a0 = a[i], a1 = a[i + 1], a2 = a[i + 2], a3 = a[i + 3];
const b0 = b[i], b1 = b[i + 1], b2 = b[i + 2], b3 = b[i + 3];
dot += a0 * b0 + a1 * b1 + a2 * b2 + a3 * b3;
normA += a0 * a0 + a1 * a1 + a2 * a2 + a3 * a3;
normB += b0 * b0 + b1 * b1 + b2 * b2 + b3 * b3;
}
for (; i < len; i++) {
dot += a[i] * b[i];
normA += a[i] * a[i];
normB += b[i] * b[i];
}
const denom = Math.sqrt(normA * normB);
return denom > 1e-10 ? dot / denom : 0;
},
/**
* Euclidean distance squared with 8x unrolling
*/
distanceSq(a, b) {
const len = a.length;
let sum = 0;
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
const d0 = a[i] - b[i];
const d1 = a[i + 1] - b[i + 1];
const d2 = a[i + 2] - b[i + 2];
const d3 = a[i + 3] - b[i + 3];
const d4 = a[i + 4] - b[i + 4];
const d5 = a[i + 5] - b[i + 5];
const d6 = a[i + 6] - b[i + 6];
const d7 = a[i + 7] - b[i + 7];
sum += d0 * d0 + d1 * d1 + d2 * d2 + d3 * d3
+ d4 * d4 + d5 * d5 + d6 * d6 + d7 * d7;
}
for (; i < len; i++) {
const d = a[i] - b[i];
sum += d * d;
}
return sum;
},
/**
* Euclidean distance
*/
distance(a, b) {
return Math.sqrt(exports.VectorOps.distanceSq(a, b));
},
/**
* Add vectors: out = a + b (with 8x unrolling)
*/
add(a, b, out) {
const len = a.length;
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
out[i] = a[i] + b[i];
out[i + 1] = a[i + 1] + b[i + 1];
out[i + 2] = a[i + 2] + b[i + 2];
out[i + 3] = a[i + 3] + b[i + 3];
out[i + 4] = a[i + 4] + b[i + 4];
out[i + 5] = a[i + 5] + b[i + 5];
out[i + 6] = a[i + 6] + b[i + 6];
out[i + 7] = a[i + 7] + b[i + 7];
}
for (; i < len; i++) {
out[i] = a[i] + b[i];
}
return out;
},
/**
* Subtract vectors: out = a - b (with 8x unrolling)
*/
sub(a, b, out) {
const len = a.length;
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
out[i] = a[i] - b[i];
out[i + 1] = a[i + 1] - b[i + 1];
out[i + 2] = a[i + 2] - b[i + 2];
out[i + 3] = a[i + 3] - b[i + 3];
out[i + 4] = a[i + 4] - b[i + 4];
out[i + 5] = a[i + 5] - b[i + 5];
out[i + 6] = a[i + 6] - b[i + 6];
out[i + 7] = a[i + 7] - b[i + 7];
}
for (; i < len; i++) {
out[i] = a[i] - b[i];
}
return out;
},
/**
* Scale vector: out = a * scalar (with 8x unrolling)
*/
scale(a, scalar, out) {
const len = a.length;
const unrolled = len - (len % 8);
let i = 0;
for (; i < unrolled; i += 8) {
out[i] = a[i] * scalar;
out[i + 1] = a[i + 1] * scalar;
out[i + 2] = a[i + 2] * scalar;
out[i + 3] = a[i + 3] * scalar;
out[i + 4] = a[i + 4] * scalar;
out[i + 5] = a[i + 5] * scalar;
out[i + 6] = a[i + 6] * scalar;
out[i + 7] = a[i + 7] * scalar;
}
for (; i < len; i++) {
out[i] = a[i] * scalar;
}
return out;
},
/**
* Normalize vector in-place
*/
normalize(a) {
const norm = exports.VectorOps.norm(a);
if (norm > 1e-10) {
exports.VectorOps.scale(a, 1 / norm, a);
}
return a;
},
/**
* Mean of multiple vectors (with buffer reuse)
*/
mean(vectors, out) {
const n = vectors.length;
if (n === 0)
return out;
const len = out.length;
out.fill(0);
// Sum all vectors
for (const vec of vectors) {
for (let i = 0; i < len; i++) {
out[i] += vec[i];
}
}
// Divide by count (unrolled)
const invN = 1 / n;
exports.VectorOps.scale(out, invN, out);
return out;
},
};
/**
* Parallel batch processor for embedding operations.
* Uses chunking and Promise.all for concurrent processing.
*/
class ParallelBatchProcessor {
constructor(options = {}) {
this.batchSize = options.batchSize ?? exports.PERF_CONSTANTS.DEFAULT_BATCH_SIZE;
this.maxConcurrency = options.maxConcurrency ?? 4;
}
/**
* Process items in parallel batches
*/
async processBatch(items, processor) {
const start = performance.now();
const results = new Array(items.length);
// For small batches, process sequentially
if (items.length < exports.PERF_CONSTANTS.MIN_PARALLEL_BATCH_SIZE) {
for (let i = 0; i < items.length; i++) {
results[i] = await processor(items[i], i);
}
}
else {
// Chunk into concurrent batches
const chunks = this.chunkArray(items, Math.ceil(items.length / this.maxConcurrency));
let offset = 0;
await Promise.all(chunks.map(async (chunk, chunkIndex) => {
const chunkOffset = chunkIndex * chunks[0].length;
for (let i = 0; i < chunk.length; i++) {
results[chunkOffset + i] = await processor(chunk[i], chunkOffset + i);
}
}));
}
const totalMs = performance.now() - start;
return {
results,
timing: {
totalMs,
perItemMs: items.length > 0 ? totalMs / items.length : 0,
},
};
}
/**
* Process with synchronous function (uses chunking for better cache locality)
*/
processSync(items, processor) {
const start = performance.now();
const results = new Array(items.length);
// Process in cache-friendly chunks
for (let i = 0; i < items.length; i += this.batchSize) {
const end = Math.min(i + this.batchSize, items.length);
for (let j = i; j < end; j++) {
results[j] = processor(items[j], j);
}
}
const totalMs = performance.now() - start;
return {
results,
timing: {
totalMs,
perItemMs: items.length > 0 ? totalMs / items.length : 0,
},
};
}
/**
* Batch similarity search (optimized for many queries)
*/
batchSimilarity(queries, corpus, k = 5) {
const results = [];
for (const query of queries) {
const scores = [];
for (let i = 0; i < corpus.length; i++) {
scores.push({
index: i,
score: exports.VectorOps.cosine(query, corpus[i]),
});
}
// Partial sort for top-k (more efficient than full sort)
scores.sort((a, b) => b.score - a.score);
results.push(scores.slice(0, k));
}
return results;
}
chunkArray(arr, chunkSize) {
const chunks = [];
for (let i = 0; i < arr.length; i += chunkSize) {
chunks.push(arr.slice(i, i + chunkSize));
}
return chunks;
}
}
exports.ParallelBatchProcessor = ParallelBatchProcessor;
/**
* High-performance memory store with O(1) LRU caching.
*/
class OptimizedMemoryStore {
constructor(options = {}) {
this.cache = new LRUCache(options.cacheSize ?? exports.PERF_CONSTANTS.DEFAULT_CACHE_SIZE);
this.bufferPool = new Float32BufferPool();
this.dimension = options.dimension ?? 384;
// Pre-warm buffer pool
this.bufferPool.prewarm([this.dimension], 16);
}
/**
* Store embedding - O(1)
*/
store(id, embedding, content) {
// Acquire buffer from pool
const buffer = this.bufferPool.acquire(this.dimension);
// Copy embedding to pooled buffer
const emb = embedding instanceof Float32Array ? embedding : new Float32Array(embedding);
buffer.set(emb);
this.cache.set(id, {
id,
embedding: buffer,
content,
score: 1.0,
});
}
/**
* Get by ID - O(1)
*/
get(id) {
return this.cache.get(id);
}
/**
* Search by similarity - O(n) but with optimized vector ops
*/
search(query, k = 5) {
const results = [];
for (const [, entry] of this.cache.entries()) {
const score = exports.VectorOps.cosine(query, entry.embedding);
results.push({ entry, score });
}
results.sort((a, b) => b.score - a.score);
return results.slice(0, k).map(r => ({ ...r.entry, score: r.score }));
}
/**
* Delete entry - O(1)
*/
delete(id) {
const entry = this.cache.get(id);
if (entry) {
this.bufferPool.release(entry.embedding);
}
return this.cache.delete(id);
}
/**
* Get statistics
*/
getStats() {
return {
cache: this.cache.getStats(),
buffers: this.bufferPool.getStats(),
};
}
}
exports.OptimizedMemoryStore = OptimizedMemoryStore;
// ============================================================================
// Exports
// ============================================================================
exports.default = {
LRUCache,
Float32BufferPool,
TensorBufferManager,
VectorOps: exports.VectorOps,
ParallelBatchProcessor,
OptimizedMemoryStore,
PERF_CONSTANTS: exports.PERF_CONSTANTS,
};