File size: 4,520 Bytes
40d7073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
/**
 * AdaptiveEmbedder - Micro-LoRA Style Optimization for ONNX Embeddings
 *
 * Applies continual learning techniques to frozen ONNX embeddings:
 *
 * 1. MICRO-LORA ADAPTERS
 *    - Low-rank projection layers (rank 2-8) on top of frozen embeddings
 *    - Domain-specific fine-tuning with minimal parameters
 *    - ~0.1% of base model parameters
 *
 * 2. CONTRASTIVE LEARNING
 *    - Files edited together → embeddings closer
 *    - Semantic clustering from trajectories
 *    - Online learning from user behavior
 *
 * 3. EWC++ (Elastic Weight Consolidation)
 *    - Prevents catastrophic forgetting
 *    - Consolidates important adaptations
 *    - Fisher information regularization
 *
 * 4. MEMORY-AUGMENTED RETRIEVAL
 *    - Episodic memory for context-aware embeddings
 *    - Attention over past similar embeddings
 *    - Domain prototype learning
 *
 * Architecture:
 *   ONNX(text) → [frozen 384d] → LoRA_A → LoRA_B → [adapted 384d]
 *                                 (384×r)   (r×384)
 */
export interface AdaptiveConfig {
    /** LoRA rank (lower = fewer params, higher = more expressive) */
    loraRank?: number;
    /** Learning rate for online updates */
    learningRate?: number;
    /** EWC regularization strength */
    ewcLambda?: number;
    /** Number of domain prototypes to maintain */
    numPrototypes?: number;
    /** Enable contrastive learning from co-edits */
    contrastiveLearning?: boolean;
    /** Temperature for contrastive loss */
    contrastiveTemp?: number;
    /** Memory capacity for episodic retrieval */
    memoryCapacity?: number;
}
export interface LoRAWeights {
    A: number[][];
    B: number[][];
    bias?: number[];
}
export interface DomainPrototype {
    domain: string;
    centroid: number[];
    count: number;
    variance: number;
}
export interface AdaptiveStats {
    baseModel: string;
    dimension: number;
    loraRank: number;
    loraParams: number;
    adaptations: number;
    prototypes: number;
    memorySize: number;
    ewcConsolidations: number;
    contrastiveUpdates: number;
}
export declare class AdaptiveEmbedder {
    private config;
    private lora;
    private prototypes;
    private episodic;
    private onnxReady;
    private dimension;
    private adaptationCount;
    private ewcCount;
    private contrastiveCount;
    private coEditBuffer;
    constructor(config?: AdaptiveConfig);
    /**
     * Initialize ONNX backend
     */
    init(): Promise<void>;
    /**
     * Generate adaptive embedding
     * Pipeline: ONNX → LoRA → Prototype Adjustment → Episodic Augmentation
     */
    embed(text: string, options?: {
        domain?: string;
        useEpisodic?: boolean;
        storeInMemory?: boolean;
    }): Promise<number[]>;
    /**
     * Batch embed with adaptation
     */
    embedBatch(texts: string[], options?: {
        domain?: string;
    }): Promise<number[][]>;
    /**
     * Learn from co-edit pattern (contrastive learning)
     * Files edited together should have similar embeddings
     */
    learnCoEdit(file1: string, content1: string, file2: string, content2: string): Promise<number>;
    /**
     * Process co-edit batch with contrastive loss
     */
    private processCoEditBatch;
    /**
     * Learn from trajectory outcome (reinforcement-like)
     */
    learnFromOutcome(context: string, action: string, success: boolean, quality?: number): Promise<void>;
    /**
     * EWC consolidation - prevent forgetting important adaptations
     * OPTIMIZED: Works with Float32Array episodic entries
     */
    consolidate(): Promise<void>;
    /**
     * Fallback hash embedding
     */
    private hashEmbed;
    private normalize;
    /**
     * Get statistics
     */
    getStats(): AdaptiveStats;
    /**
     * Export learned weights
     */
    export(): {
        lora: LoRAWeights;
        prototypes: DomainPrototype[];
        stats: AdaptiveStats;
    };
    /**
     * Import learned weights
     */
    import(data: {
        lora?: LoRAWeights;
        prototypes?: DomainPrototype[];
    }): void;
    /**
     * Reset adaptations
     */
    reset(): void;
    /**
     * Get LoRA cache statistics
     */
    getCacheStats(): {
        size: number;
        maxSize: number;
    };
}
export declare function getAdaptiveEmbedder(config?: AdaptiveConfig): AdaptiveEmbedder;
export declare function initAdaptiveEmbedder(config?: AdaptiveConfig): Promise<AdaptiveEmbedder>;
export default AdaptiveEmbedder;
//# sourceMappingURL=adaptive-embedder.d.ts.map