Spaces:
Running
Running
| /** | |
| * Property-Based Test: Property 41 - Lazy Loading Implementation | |
| * Validates: Requirements 15.5 | |
| * | |
| * Tests that the parsed model cache correctly implements lazy loading: | |
| * - Models are only parsed once (cache hit avoids re-parsing) | |
| * - Cache evicts oldest entries when at capacity | |
| * - Uploaded files (no path key) are not cached | |
| */ | |
| import { describe, it, expect, beforeEach } from 'vitest'; | |
| import fc from 'fast-check'; | |
| // βββ Extracted cache logic (mirrors app.js implementation) βββββββββββββββββββ | |
| /** | |
| * Creates a parsed model cache with LRU-style eviction. | |
| * @param {number} maxSize | |
| */ | |
| function createModelCache(maxSize) { | |
| const cache = new Map(); | |
| return { | |
| get(key) { | |
| return cache.get(key); | |
| }, | |
| set(key, value) { | |
| if (cache.size >= maxSize) { | |
| // Evict oldest (first inserted) entry | |
| const firstKey = cache.keys().next().value; | |
| cache.delete(firstKey); | |
| } | |
| cache.set(key, value); | |
| }, | |
| has(key) { | |
| return cache.has(key); | |
| }, | |
| size() { | |
| return cache.size; | |
| }, | |
| clear() { | |
| cache.clear(); | |
| }, | |
| keys() { | |
| return Array.from(cache.keys()); | |
| } | |
| }; | |
| } | |
| /** | |
| * Simulates the model selection flow with caching. | |
| * Returns { parsedModel, cacheHit } where cacheHit indicates if cache was used. | |
| */ | |
| async function selectModelWithCache(cache, modelPath, parseModel) { | |
| const cached = cache.get(modelPath); | |
| if (cached) { | |
| return { parsedModel: cached, cacheHit: true }; | |
| } | |
| const parsed = await parseModel(modelPath); | |
| cache.set(modelPath, parsed); | |
| return { parsedModel: parsed, cacheHit: false }; | |
| } | |
| // βββ Tests ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| describe('Property 41: Lazy Loading Implementation', () => { | |
| /** | |
| * **Validates: Requirements 15.5** | |
| * | |
| * Property: For any model path, selecting the same model twice should | |
| * result in a cache hit on the second selection (parse is called only once). | |
| */ | |
| it('should cache parsed models and avoid re-parsing on repeated selection', async () => { | |
| await fc.assert( | |
| fc.asyncProperty( | |
| fc.string({ minLength: 1, maxLength: 50 }).filter(s => s.trim().length > 0), | |
| async (modelPath) => { | |
| const cache = createModelCache(10); | |
| let parseCallCount = 0; | |
| const mockParse = async (path) => { | |
| parseCallCount++; | |
| return { metadata: { fileName: path }, graph: { nodes: [] } }; | |
| }; | |
| // First selection: should parse | |
| const first = await selectModelWithCache(cache, modelPath, mockParse); | |
| expect(first.cacheHit).toBe(false); | |
| expect(parseCallCount).toBe(1); | |
| // Second selection: should use cache | |
| const second = await selectModelWithCache(cache, modelPath, mockParse); | |
| expect(second.cacheHit).toBe(true); | |
| expect(parseCallCount).toBe(1); // No additional parse call | |
| // Both results should be the same object | |
| expect(second.parsedModel).toBe(first.parsedModel); | |
| } | |
| ), | |
| { numRuns: 100 } | |
| ); | |
| }); | |
| /** | |
| * **Validates: Requirements 15.5** | |
| * | |
| * Property: For any set of distinct model paths up to cache capacity, | |
| * all models should be cached after selection. | |
| */ | |
| it('should cache all models within capacity', async () => { | |
| await fc.assert( | |
| fc.asyncProperty( | |
| fc.array( | |
| fc.string({ minLength: 1, maxLength: 30 }).filter(s => s.trim().length > 0), | |
| { minLength: 1, maxLength: 8 } | |
| ).map(arr => [...new Set(arr)]), // unique paths | |
| async (modelPaths) => { | |
| const cacheSize = 10; | |
| const cache = createModelCache(cacheSize); | |
| const mockParse = async (path) => ({ metadata: { fileName: path } }); | |
| for (const path of modelPaths) { | |
| await selectModelWithCache(cache, path, mockParse); | |
| } | |
| // All unique paths (up to cache size) should be cached | |
| const expectedCached = modelPaths.slice(-cacheSize); | |
| for (const path of expectedCached) { | |
| expect(cache.has(path)).toBe(true); | |
| } | |
| } | |
| ), | |
| { numRuns: 100 } | |
| ); | |
| }); | |
| /** | |
| * **Validates: Requirements 15.5** | |
| * | |
| * Property: When cache is at capacity and a new model is added, | |
| * the oldest entry is evicted and cache size stays at maxSize. | |
| */ | |
| it('should evict oldest entry when cache is full', async () => { | |
| await fc.assert( | |
| fc.asyncProperty( | |
| fc.integer({ min: 1, max: 8 }), | |
| async (maxSize) => { | |
| const cache = createModelCache(maxSize); | |
| const mockParse = async (path) => ({ metadata: { fileName: path } }); | |
| // Fill cache to capacity with unique paths | |
| const paths = Array.from({ length: maxSize }, (_, i) => `model_${i}.onnx`); | |
| for (const path of paths) { | |
| await selectModelWithCache(cache, path, mockParse); | |
| } | |
| expect(cache.size()).toBe(maxSize); | |
| // Add one more model (should evict oldest) | |
| const newPath = 'model_new.onnx'; | |
| await selectModelWithCache(cache, newPath, mockParse); | |
| // Cache size should not exceed maxSize | |
| expect(cache.size()).toBe(maxSize); | |
| // New model should be in cache | |
| expect(cache.has(newPath)).toBe(true); | |
| // Oldest model (first inserted) should be evicted | |
| expect(cache.has(paths[0])).toBe(false); | |
| } | |
| ), | |
| { numRuns: 50 } | |
| ); | |
| }); | |
| /** | |
| * **Validates: Requirements 15.5** | |
| * | |
| * Property: For any sequence of model selections with repeated paths, | |
| * the total number of parse calls should equal the number of unique paths | |
| * (not the total number of selections). | |
| */ | |
| it('should parse each unique model path exactly once', async () => { | |
| await fc.assert( | |
| fc.asyncProperty( | |
| fc.array( | |
| fc.integer({ min: 0, max: 4 }).map(i => `model_${i}.onnx`), | |
| { minLength: 2, maxLength: 20 } | |
| ), | |
| async (selectionSequence) => { | |
| const cacheSize = 10; // Large enough to hold all 5 possible models | |
| const cache = createModelCache(cacheSize); | |
| const parseCallCounts = new Map(); | |
| const mockParse = async (path) => { | |
| parseCallCounts.set(path, (parseCallCounts.get(path) || 0) + 1); | |
| return { metadata: { fileName: path } }; | |
| }; | |
| for (const path of selectionSequence) { | |
| await selectModelWithCache(cache, path, mockParse); | |
| } | |
| // Each unique path should have been parsed exactly once | |
| for (const [, count] of parseCallCounts) { | |
| expect(count).toBe(1); | |
| } | |
| } | |
| ), | |
| { numRuns: 100 } | |
| ); | |
| }); | |
| }); | |