| | "use strict"; |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | Object.defineProperty(exports, "__esModule", { value: true }); |
| | exports.AdaptiveEmbedder = void 0; |
| | exports.getAdaptiveEmbedder = getAdaptiveEmbedder; |
| | exports.initAdaptiveEmbedder = initAdaptiveEmbedder; |
| | const onnx_embedder_1 = require("./onnx-embedder"); |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | class MicroLoRA { |
| | constructor(dim, rank, scale = 0.1) { |
| | |
| | this.fisherA = null; |
| | this.fisherB = null; |
| | this.savedA = null; |
| | this.savedB = null; |
| | |
| | this.cache = new Map(); |
| | this.cacheMaxSize = 256; |
| | this.dim = dim; |
| | this.rank = rank; |
| | this.scale = scale; |
| | |
| | const stdA = Math.sqrt(2 / (dim + rank)); |
| | const stdB = Math.sqrt(2 / (rank + dim)) * 0.01; |
| | this.A = this.initFlatMatrix(dim, rank, stdA); |
| | this.B = this.initFlatMatrix(rank, dim, stdB); |
| | |
| | this.hiddenBuffer = new Float32Array(rank); |
| | this.outputBuffer = new Float32Array(dim); |
| | } |
| | initFlatMatrix(rows, cols, std) { |
| | const arr = new Float32Array(rows * cols); |
| | for (let i = 0; i < arr.length; i++) { |
| | arr[i] = (Math.random() - 0.5) * 2 * std; |
| | } |
| | return arr; |
| | } |
| | |
| | |
| | |
| | hashInput(input) { |
| | let h = 2166136261; |
| | const len = Math.min(input.length, 32); |
| | for (let i = 0; i < len; i++) { |
| | h ^= Math.floor(input[i] * 10000); |
| | h = Math.imul(h, 16777619); |
| | } |
| | return h.toString(36); |
| | } |
| | |
| | |
| | |
| | |
| | forward(input) { |
| | |
| | const cacheKey = this.hashInput(input); |
| | const cached = this.cache.get(cacheKey); |
| | if (cached) { |
| | return Array.from(cached); |
| | } |
| | |
| | this.hiddenBuffer.fill(0); |
| | |
| | |
| | const dim4 = this.dim - (this.dim % 4); |
| | for (let r = 0; r < this.rank; r++) { |
| | let sum = 0; |
| | const rOffset = r; |
| | |
| | for (let d = 0; d < dim4; d += 4) { |
| | const aIdx = d * this.rank + rOffset; |
| | sum += input[d] * this.A[aIdx]; |
| | sum += input[d + 1] * this.A[aIdx + this.rank]; |
| | sum += input[d + 2] * this.A[aIdx + 2 * this.rank]; |
| | sum += input[d + 3] * this.A[aIdx + 3 * this.rank]; |
| | } |
| | |
| | for (let d = dim4; d < this.dim; d++) { |
| | sum += input[d] * this.A[d * this.rank + rOffset]; |
| | } |
| | this.hiddenBuffer[r] = sum; |
| | } |
| | |
| | |
| | for (let d = 0; d < this.dim; d++) { |
| | this.outputBuffer[d] = input[d]; |
| | } |
| | |
| | for (let d = 0; d < this.dim; d++) { |
| | let delta = 0; |
| | for (let r = 0; r < this.rank; r++) { |
| | delta += this.hiddenBuffer[r] * this.B[r * this.dim + d]; |
| | } |
| | this.outputBuffer[d] += this.scale * delta; |
| | } |
| | |
| | if (this.cache.size >= this.cacheMaxSize) { |
| | const firstKey = this.cache.keys().next().value; |
| | if (firstKey) |
| | this.cache.delete(firstKey); |
| | } |
| | this.cache.set(cacheKey, new Float32Array(this.outputBuffer)); |
| | return Array.from(this.outputBuffer); |
| | } |
| | |
| | |
| | |
| | clearCache() { |
| | this.cache.clear(); |
| | } |
| | |
| | |
| | |
| | |
| | |
| | backward(anchor, positive, negatives, lr, ewcLambda = 0) { |
| | if (!positive && negatives.length === 0) |
| | return 0; |
| | |
| | this.clearCache(); |
| | |
| | const anchorOut = this.forward(anchor); |
| | const positiveOut = positive ? this.forward(positive) : null; |
| | const negativeOuts = negatives.map(n => this.forward(n)); |
| | |
| | const temp = 0.07; |
| | let loss = 0; |
| | if (positiveOut) { |
| | |
| | const posSim = this.cosineSimilarity(anchorOut, positiveOut) / temp; |
| | |
| | const negSims = negativeOuts.map(n => this.cosineSimilarity(anchorOut, n) / temp); |
| | |
| | const maxSim = Math.max(posSim, ...negSims); |
| | const expPos = Math.exp(posSim - maxSim); |
| | const expNegs = negSims.reduce((sum, s) => sum + Math.exp(s - maxSim), 0); |
| | loss = -Math.log(expPos / (expPos + expNegs) + 1e-8); |
| | |
| | const gradScale = lr * this.scale; |
| | |
| | for (let d = 0; d < this.dim; d++) { |
| | for (let r = 0; r < this.rank; r++) { |
| | const idx = d * this.rank + r; |
| | |
| | const pOutR = r < positiveOut.length ? positiveOut[r] : 0; |
| | const aOutR = r < anchorOut.length ? anchorOut[r] : 0; |
| | const gradA = anchor[d] * (pOutR - aOutR) * gradScale; |
| | this.A[idx] += gradA; |
| | |
| | if (ewcLambda > 0 && this.fisherA && this.savedA) { |
| | this.A[idx] -= ewcLambda * this.fisherA[idx] * (this.A[idx] - this.savedA[idx]); |
| | } |
| | } |
| | } |
| | |
| | for (let r = 0; r < this.rank; r++) { |
| | const anchorR = r < anchor.length ? anchor[r] : 0; |
| | for (let d = 0; d < this.dim; d++) { |
| | const idx = r * this.dim + d; |
| | const gradB = anchorR * (positiveOut[d] - anchorOut[d]) * gradScale * 0.1; |
| | this.B[idx] += gradB; |
| | if (ewcLambda > 0 && this.fisherB && this.savedB) { |
| | this.B[idx] -= ewcLambda * this.fisherB[idx] * (this.B[idx] - this.savedB[idx]); |
| | } |
| | } |
| | } |
| | } |
| | return loss; |
| | } |
| | |
| | |
| | |
| | |
| | consolidate(embeddings) { |
| | |
| | this.savedA = new Float32Array(this.A); |
| | this.savedB = new Float32Array(this.B); |
| | |
| | this.fisherA = new Float32Array(this.dim * this.rank); |
| | this.fisherB = new Float32Array(this.rank * this.dim); |
| | const numEmb = embeddings.length; |
| | for (const emb of embeddings) { |
| | |
| | for (let d = 0; d < this.dim; d++) { |
| | const embD = emb[d] * emb[d] / numEmb; |
| | for (let r = 0; r < this.rank; r++) { |
| | this.fisherA[d * this.rank + r] += embD; |
| | } |
| | } |
| | } |
| | |
| | this.clearCache(); |
| | } |
| | |
| | |
| | |
| | cosineSimilarity(a, b) { |
| | let dot = 0, normA = 0, normB = 0; |
| | const len = Math.min(a.length, b.length); |
| | |
| | const len4 = len - (len % 4); |
| | for (let i = 0; i < len4; i += 4) { |
| | dot += a[i] * b[i] + a[i + 1] * b[i + 1] + a[i + 2] * b[i + 2] + a[i + 3] * b[i + 3]; |
| | normA += a[i] * a[i] + a[i + 1] * a[i + 1] + a[i + 2] * a[i + 2] + a[i + 3] * a[i + 3]; |
| | normB += b[i] * b[i] + b[i + 1] * b[i + 1] + b[i + 2] * b[i + 2] + b[i + 3] * b[i + 3]; |
| | } |
| | |
| | for (let i = len4; i < len; i++) { |
| | dot += a[i] * b[i]; |
| | normA += a[i] * a[i]; |
| | normB += b[i] * b[i]; |
| | } |
| | return dot / (Math.sqrt(normA * normB) + 1e-8); |
| | } |
| | getParams() { |
| | return this.dim * this.rank + this.rank * this.dim; |
| | } |
| | getCacheStats() { |
| | return { |
| | size: this.cache.size, |
| | maxSize: this.cacheMaxSize, |
| | hitRate: 0, |
| | }; |
| | } |
| | |
| | |
| | |
| | export() { |
| | |
| | const A = []; |
| | for (let d = 0; d < this.dim; d++) { |
| | const row = []; |
| | for (let r = 0; r < this.rank; r++) { |
| | row.push(this.A[d * this.rank + r]); |
| | } |
| | A.push(row); |
| | } |
| | const B = []; |
| | for (let r = 0; r < this.rank; r++) { |
| | const row = []; |
| | for (let d = 0; d < this.dim; d++) { |
| | row.push(this.B[r * this.dim + d]); |
| | } |
| | B.push(row); |
| | } |
| | return { A, B }; |
| | } |
| | |
| | |
| | |
| | import(weights) { |
| | |
| | for (let d = 0; d < this.dim && d < weights.A.length; d++) { |
| | for (let r = 0; r < this.rank && r < weights.A[d].length; r++) { |
| | this.A[d * this.rank + r] = weights.A[d][r]; |
| | } |
| | } |
| | for (let r = 0; r < this.rank && r < weights.B.length; r++) { |
| | for (let d = 0; d < this.dim && d < weights.B[r].length; d++) { |
| | this.B[r * this.dim + d] = weights.B[r][d]; |
| | } |
| | } |
| | |
| | this.clearCache(); |
| | } |
| | } |
| | |
| | |
| | |
| | class PrototypeMemory { |
| | constructor(maxPrototypes = 50, dimension = 384) { |
| | this.prototypes = new Map(); |
| | this.maxPrototypes = maxPrototypes; |
| | this.scratchBuffer = new Float32Array(dimension); |
| | } |
| | |
| | |
| | |
| | |
| | update(domain, embedding) { |
| | const existing = this.prototypes.get(domain); |
| | if (existing) { |
| | |
| | const n = existing.count + 1; |
| | const invN = 1 / n; |
| | |
| | const len = Math.min(embedding.length, existing.centroid.length); |
| | const len4 = len - (len % 4); |
| | for (let i = 0; i < len4; i += 4) { |
| | const d0 = embedding[i] - existing.centroid[i]; |
| | const d1 = embedding[i + 1] - existing.centroid[i + 1]; |
| | const d2 = embedding[i + 2] - existing.centroid[i + 2]; |
| | const d3 = embedding[i + 3] - existing.centroid[i + 3]; |
| | existing.centroid[i] += d0 * invN; |
| | existing.centroid[i + 1] += d1 * invN; |
| | existing.centroid[i + 2] += d2 * invN; |
| | existing.centroid[i + 3] += d3 * invN; |
| | existing.variance += d0 * (embedding[i] - existing.centroid[i]); |
| | existing.variance += d1 * (embedding[i + 1] - existing.centroid[i + 1]); |
| | existing.variance += d2 * (embedding[i + 2] - existing.centroid[i + 2]); |
| | existing.variance += d3 * (embedding[i + 3] - existing.centroid[i + 3]); |
| | } |
| | for (let i = len4; i < len; i++) { |
| | const delta = embedding[i] - existing.centroid[i]; |
| | existing.centroid[i] += delta * invN; |
| | existing.variance += delta * (embedding[i] - existing.centroid[i]); |
| | } |
| | existing.count = n; |
| | } |
| | else { |
| | |
| | if (this.prototypes.size >= this.maxPrototypes) { |
| | |
| | let minCount = Infinity; |
| | let minKey = ''; |
| | for (const [key, proto] of this.prototypes) { |
| | if (proto.count < minCount) { |
| | minCount = proto.count; |
| | minKey = key; |
| | } |
| | } |
| | this.prototypes.delete(minKey); |
| | } |
| | this.prototypes.set(domain, { |
| | domain, |
| | centroid: Array.from(embedding), |
| | count: 1, |
| | variance: 0, |
| | }); |
| | } |
| | } |
| | |
| | |
| | |
| | |
| | adjust(embedding) { |
| | if (this.prototypes.size === 0) { |
| | return { adjusted: Array.from(embedding), domain: null, confidence: 0 }; |
| | } |
| | let bestSim = -Infinity; |
| | let bestProto = null; |
| | for (const proto of this.prototypes.values()) { |
| | const sim = this.cosineSimilarityFast(embedding, proto.centroid); |
| | if (sim > bestSim) { |
| | bestSim = sim; |
| | bestProto = proto; |
| | } |
| | } |
| | if (!bestProto || bestSim < 0.5) { |
| | return { adjusted: Array.from(embedding), domain: null, confidence: 0 }; |
| | } |
| | |
| | const alpha = 0.1 * bestSim; |
| | const oneMinusAlpha = 1 - alpha; |
| | const adjusted = new Array(embedding.length); |
| | |
| | const len = embedding.length; |
| | const len4 = len - (len % 4); |
| | for (let i = 0; i < len4; i += 4) { |
| | adjusted[i] = embedding[i] * oneMinusAlpha + bestProto.centroid[i] * alpha; |
| | adjusted[i + 1] = embedding[i + 1] * oneMinusAlpha + bestProto.centroid[i + 1] * alpha; |
| | adjusted[i + 2] = embedding[i + 2] * oneMinusAlpha + bestProto.centroid[i + 2] * alpha; |
| | adjusted[i + 3] = embedding[i + 3] * oneMinusAlpha + bestProto.centroid[i + 3] * alpha; |
| | } |
| | for (let i = len4; i < len; i++) { |
| | adjusted[i] = embedding[i] * oneMinusAlpha + bestProto.centroid[i] * alpha; |
| | } |
| | return { |
| | adjusted, |
| | domain: bestProto.domain, |
| | confidence: bestSim, |
| | }; |
| | } |
| | |
| | |
| | |
| | cosineSimilarityFast(a, b) { |
| | let dot = 0, normA = 0, normB = 0; |
| | const len = Math.min(a.length, b.length); |
| | const len4 = len - (len % 4); |
| | for (let i = 0; i < len4; i += 4) { |
| | dot += a[i] * b[i] + a[i + 1] * b[i + 1] + a[i + 2] * b[i + 2] + a[i + 3] * b[i + 3]; |
| | normA += a[i] * a[i] + a[i + 1] * a[i + 1] + a[i + 2] * a[i + 2] + a[i + 3] * a[i + 3]; |
| | normB += b[i] * b[i] + b[i + 1] * b[i + 1] + b[i + 2] * b[i + 2] + b[i + 3] * b[i + 3]; |
| | } |
| | for (let i = len4; i < len; i++) { |
| | dot += a[i] * b[i]; |
| | normA += a[i] * a[i]; |
| | normB += b[i] * b[i]; |
| | } |
| | return dot / (Math.sqrt(normA * normB) + 1e-8); |
| | } |
| | getPrototypes() { |
| | return Array.from(this.prototypes.values()); |
| | } |
| | export() { |
| | return this.getPrototypes(); |
| | } |
| | import(prototypes) { |
| | this.prototypes.clear(); |
| | for (const p of prototypes) { |
| | this.prototypes.set(p.domain, p); |
| | } |
| | } |
| | } |
| | class EpisodicMemory { |
| | constructor(capacity = 1000, dimension = 384) { |
| | this.entries = []; |
| | this.capacity = capacity; |
| | this.dimension = dimension; |
| | this.augmentBuffer = new Float32Array(dimension); |
| | this.weightsBuffer = new Float32Array(Math.min(capacity, 16)); |
| | } |
| | add(embedding, context) { |
| | if (this.entries.length >= this.capacity) { |
| | |
| | let minIdx = 0; |
| | let minCount = this.entries[0].useCount; |
| | for (let i = 1; i < this.entries.length; i++) { |
| | if (this.entries[i].useCount < minCount) { |
| | minCount = this.entries[i].useCount; |
| | minIdx = i; |
| | } |
| | } |
| | this.entries.splice(minIdx, 1); |
| | } |
| | |
| | const emb = embedding instanceof Float32Array |
| | ? new Float32Array(embedding) |
| | : new Float32Array(embedding); |
| | let normSq = 0; |
| | for (let i = 0; i < emb.length; i++) { |
| | normSq += emb[i] * emb[i]; |
| | } |
| | this.entries.push({ |
| | embedding: emb, |
| | context, |
| | timestamp: Date.now(), |
| | useCount: 0, |
| | normSquared: normSq, |
| | }); |
| | } |
| | |
| | |
| | |
| | |
| | retrieve(query, k = 5) { |
| | if (this.entries.length === 0) |
| | return []; |
| | |
| | let queryNormSq = 0; |
| | for (let i = 0; i < query.length; i++) { |
| | queryNormSq += query[i] * query[i]; |
| | } |
| | const queryNorm = Math.sqrt(queryNormSq); |
| | |
| | const scored = []; |
| | for (const entry of this.entries) { |
| | |
| | let dot = 0; |
| | const len = Math.min(query.length, entry.embedding.length); |
| | const len4 = len - (len % 4); |
| | for (let i = 0; i < len4; i += 4) { |
| | dot += query[i] * entry.embedding[i]; |
| | dot += query[i + 1] * entry.embedding[i + 1]; |
| | dot += query[i + 2] * entry.embedding[i + 2]; |
| | dot += query[i + 3] * entry.embedding[i + 3]; |
| | } |
| | for (let i = len4; i < len; i++) { |
| | dot += query[i] * entry.embedding[i]; |
| | } |
| | const similarity = dot / (queryNorm * Math.sqrt(entry.normSquared) + 1e-8); |
| | scored.push({ entry, similarity }); |
| | } |
| | |
| | if (scored.length <= k) { |
| | scored.sort((a, b) => b.similarity - a.similarity); |
| | for (const s of scored) |
| | s.entry.useCount++; |
| | return scored.map(s => s.entry); |
| | } |
| | |
| | scored.sort((a, b) => b.similarity - a.similarity); |
| | const topK = scored.slice(0, k); |
| | for (const s of topK) |
| | s.entry.useCount++; |
| | return topK.map(s => s.entry); |
| | } |
| | |
| | |
| | |
| | |
| | augment(embedding, k = 3) { |
| | const similar = this.retrieve(embedding, k); |
| | if (similar.length === 0) |
| | return Array.from(embedding); |
| | |
| | let queryNormSq = 0; |
| | for (let i = 0; i < embedding.length; i++) { |
| | queryNormSq += embedding[i] * embedding[i]; |
| | } |
| | const queryNorm = Math.sqrt(queryNormSq); |
| | |
| | let sumWeights = 1; |
| | for (let j = 0; j < similar.length; j++) { |
| | |
| | let dot = 0; |
| | const emb = similar[j].embedding; |
| | const len = Math.min(embedding.length, emb.length); |
| | for (let i = 0; i < len; i++) { |
| | dot += embedding[i] * emb[i]; |
| | } |
| | const sim = dot / (queryNorm * Math.sqrt(similar[j].normSquared) + 1e-8); |
| | const weight = Math.exp(sim / 0.1); |
| | this.weightsBuffer[j] = weight; |
| | sumWeights += weight; |
| | } |
| | const invSumWeights = 1 / sumWeights; |
| | |
| | const dim = embedding.length; |
| | for (let i = 0; i < dim; i++) { |
| | let sum = embedding[i]; |
| | for (let j = 0; j < similar.length; j++) { |
| | sum += this.weightsBuffer[j] * similar[j].embedding[i]; |
| | } |
| | this.augmentBuffer[i] = sum * invSumWeights; |
| | } |
| | return Array.from(this.augmentBuffer.subarray(0, dim)); |
| | } |
| | size() { |
| | return this.entries.length; |
| | } |
| | clear() { |
| | this.entries = []; |
| | } |
| | } |
| | |
| | |
| | |
| | class AdaptiveEmbedder { |
| | constructor(config = {}) { |
| | this.onnxReady = false; |
| | this.dimension = 384; |
| | |
| | this.adaptationCount = 0; |
| | this.ewcCount = 0; |
| | this.contrastiveCount = 0; |
| | |
| | this.coEditBuffer = []; |
| | this.config = { |
| | loraRank: config.loraRank ?? 4, |
| | learningRate: config.learningRate ?? 0.01, |
| | ewcLambda: config.ewcLambda ?? 0.1, |
| | numPrototypes: config.numPrototypes ?? 50, |
| | contrastiveLearning: config.contrastiveLearning ?? true, |
| | contrastiveTemp: config.contrastiveTemp ?? 0.07, |
| | memoryCapacity: config.memoryCapacity ?? 1000, |
| | }; |
| | |
| | this.lora = new MicroLoRA(this.dimension, this.config.loraRank); |
| | this.prototypes = new PrototypeMemory(this.config.numPrototypes, this.dimension); |
| | this.episodic = new EpisodicMemory(this.config.memoryCapacity, this.dimension); |
| | } |
| | |
| | |
| | |
| | async init() { |
| | if ((0, onnx_embedder_1.isOnnxAvailable)()) { |
| | await (0, onnx_embedder_1.initOnnxEmbedder)(); |
| | this.onnxReady = true; |
| | } |
| | } |
| | |
| | |
| | |
| | |
| | async embed(text, options) { |
| | |
| | let baseEmb; |
| | if (this.onnxReady) { |
| | const result = await (0, onnx_embedder_1.embed)(text); |
| | baseEmb = result.embedding; |
| | } |
| | else { |
| | |
| | baseEmb = this.hashEmbed(text); |
| | } |
| | |
| | let adapted = this.lora.forward(baseEmb); |
| | |
| | if (options?.domain) { |
| | this.prototypes.update(options.domain, adapted); |
| | } |
| | const { adjusted, domain } = this.prototypes.adjust(adapted); |
| | adapted = adjusted; |
| | |
| | if (options?.useEpisodic !== false) { |
| | adapted = this.episodic.augment(adapted); |
| | } |
| | |
| | if (options?.storeInMemory !== false) { |
| | this.episodic.add(adapted, text.slice(0, 100)); |
| | } |
| | |
| | return this.normalize(adapted); |
| | } |
| | |
| | |
| | |
| | async embedBatch(texts, options) { |
| | const results = []; |
| | if (this.onnxReady) { |
| | const baseResults = await (0, onnx_embedder_1.embedBatch)(texts); |
| | for (let i = 0; i < baseResults.length; i++) { |
| | let adapted = this.lora.forward(baseResults[i].embedding); |
| | if (options?.domain) { |
| | this.prototypes.update(options.domain, adapted); |
| | } |
| | const { adjusted } = this.prototypes.adjust(adapted); |
| | results.push(this.normalize(adjusted)); |
| | } |
| | } |
| | else { |
| | for (const text of texts) { |
| | results.push(await this.embed(text, options)); |
| | } |
| | } |
| | return results; |
| | } |
| | |
| | |
| | |
| | |
| | async learnCoEdit(file1, content1, file2, content2) { |
| | if (!this.config.contrastiveLearning) |
| | return 0; |
| | |
| | const emb1 = await this.embed(content1.slice(0, 512), { storeInMemory: false }); |
| | const emb2 = await this.embed(content2.slice(0, 512), { storeInMemory: false }); |
| | |
| | this.coEditBuffer.push({ file1, emb1, file2, emb2 }); |
| | |
| | if (this.coEditBuffer.length >= 16) { |
| | return this.processCoEditBatch(); |
| | } |
| | return 0; |
| | } |
| | |
| | |
| | |
| | processCoEditBatch() { |
| | if (this.coEditBuffer.length < 2) |
| | return 0; |
| | let totalLoss = 0; |
| | for (const { emb1, emb2 } of this.coEditBuffer) { |
| | |
| | const negatives = this.coEditBuffer |
| | .filter(p => p.emb1 !== emb1) |
| | .slice(0, 4) |
| | .map(p => p.emb1); |
| | |
| | const loss = this.lora.backward(emb1, emb2, negatives, this.config.learningRate, this.config.ewcLambda); |
| | totalLoss += loss; |
| | this.contrastiveCount++; |
| | } |
| | this.coEditBuffer = []; |
| | this.adaptationCount++; |
| | return totalLoss / this.coEditBuffer.length; |
| | } |
| | |
| | |
| | |
| | async learnFromOutcome(context, action, success, quality = 0.5) { |
| | const contextEmb = await this.embed(context, { storeInMemory: false }); |
| | const actionEmb = await this.embed(action, { storeInMemory: false }); |
| | if (success && quality > 0.7) { |
| | |
| | this.lora.backward(contextEmb, actionEmb, [], this.config.learningRate * quality, this.config.ewcLambda); |
| | this.adaptationCount++; |
| | } |
| | } |
| | |
| | |
| | |
| | |
| | async consolidate() { |
| | |
| | const embeddings = []; |
| | const entries = this.episodic.entries || []; |
| | |
| | const recentEntries = entries.slice(-100); |
| | for (const entry of recentEntries) { |
| | if (entry.embedding instanceof Float32Array) { |
| | embeddings.push(entry.embedding); |
| | } |
| | } |
| | if (embeddings.length > 10) { |
| | this.lora.consolidate(embeddings); |
| | this.ewcCount++; |
| | } |
| | } |
| | |
| | |
| | |
| | hashEmbed(text) { |
| | const embedding = new Array(this.dimension).fill(0); |
| | const tokens = text.toLowerCase().split(/\s+/); |
| | for (let t = 0; t < tokens.length; t++) { |
| | const token = tokens[t]; |
| | const posWeight = 1 / (1 + t * 0.1); |
| | for (let i = 0; i < token.length; i++) { |
| | const code = token.charCodeAt(i); |
| | const h1 = (code * 31 + i * 17 + t * 7) % this.dimension; |
| | const h2 = (code * 37 + i * 23 + t * 11) % this.dimension; |
| | embedding[h1] += posWeight; |
| | embedding[h2] += posWeight * 0.5; |
| | } |
| | } |
| | return this.normalize(embedding); |
| | } |
| | normalize(v) { |
| | const norm = Math.sqrt(v.reduce((a, b) => a + b * b, 0)); |
| | return norm > 0 ? v.map(x => x / norm) : v; |
| | } |
| | |
| | |
| | |
| | getStats() { |
| | return { |
| | baseModel: 'all-MiniLM-L6-v2', |
| | dimension: this.dimension, |
| | loraRank: this.config.loraRank, |
| | loraParams: this.lora.getParams(), |
| | adaptations: this.adaptationCount, |
| | prototypes: this.prototypes.getPrototypes().length, |
| | memorySize: this.episodic.size(), |
| | ewcConsolidations: this.ewcCount, |
| | contrastiveUpdates: this.contrastiveCount, |
| | }; |
| | } |
| | |
| | |
| | |
| | export() { |
| | return { |
| | lora: this.lora.export(), |
| | prototypes: this.prototypes.export(), |
| | stats: this.getStats(), |
| | }; |
| | } |
| | |
| | |
| | |
| | import(data) { |
| | if (data.lora) { |
| | this.lora.import(data.lora); |
| | } |
| | if (data.prototypes) { |
| | this.prototypes.import(data.prototypes); |
| | } |
| | } |
| | |
| | |
| | |
| | reset() { |
| | this.lora = new MicroLoRA(this.dimension, this.config.loraRank); |
| | this.prototypes = new PrototypeMemory(this.config.numPrototypes, this.dimension); |
| | this.episodic.clear(); |
| | this.adaptationCount = 0; |
| | this.ewcCount = 0; |
| | this.contrastiveCount = 0; |
| | this.coEditBuffer = []; |
| | } |
| | |
| | |
| | |
| | getCacheStats() { |
| | return this.lora.getCacheStats?.() ?? { size: 0, maxSize: 256 }; |
| | } |
| | } |
| | exports.AdaptiveEmbedder = AdaptiveEmbedder; |
| | |
| | |
| | |
| | let instance = null; |
| | function getAdaptiveEmbedder(config) { |
| | if (!instance) { |
| | instance = new AdaptiveEmbedder(config); |
| | } |
| | return instance; |
| | } |
| | async function initAdaptiveEmbedder(config) { |
| | const embedder = getAdaptiveEmbedder(config); |
| | await embedder.init(); |
| | return embedder; |
| | } |
| | exports.default = AdaptiveEmbedder; |
| |
|