Spaces:
Runtime error
Runtime error
| import crypto from 'crypto'; | |
| import fs from 'fs/promises'; | |
| import path from 'path'; | |
| import { loadEncryptedJson, saveEncryptedJson, readEncryptedFile, writeEncryptedFile } from './cryptoUtils.js'; | |
| import { isPostgresStorageMode } from './dataPaths.js'; | |
| import { | |
| decryptBinaryPayload, | |
| decryptJsonPayload, | |
| encryptBinaryPayload, | |
| encryptJsonPayload, | |
| makeOwnerLookup, | |
| pgQuery, | |
| withPgTransaction, | |
| } from './postgres.js'; | |
| const DATA_ROOT = '/data/media'; | |
| const INDEX_FILE = path.join(DATA_ROOT, 'index.json'); | |
| const BLOBS_DIR = path.join(DATA_ROOT, 'blobs'); | |
| const TRASH_RETENTION_MS = 30 * 24 * 60 * 60 * 1000; | |
| const GUEST_RETENTION_MS = 24 * 60 * 60 * 1000; | |
| const MEDIA_QUOTA_BYTES = 5 * 1024 * 1024 * 1024; | |
| const state = { | |
| loaded: false, | |
| index: { | |
| entries: {}, | |
| }, | |
| }; | |
| function nowIso() { | |
| return new Date().toISOString(); | |
| } | |
| function ownerKey(owner) { | |
| return `${owner.type}:${owner.id}`; | |
| } | |
| function ensureOwner(owner) { | |
| if (!owner?.type || !owner?.id) throw new Error('Invalid media owner'); | |
| return owner; | |
| } | |
| function normalizeName(name, fallback = 'Untitled') { | |
| const clean = String(name || '').trim().replace(/[\\/:*?"<>|]+/g, '_'); | |
| return clean || fallback; | |
| } | |
| function extensionFromName(name = '') { | |
| const ext = path.extname(String(name || '')).toLowerCase(); | |
| return ext.startsWith('.') ? ext.slice(1) : ext; | |
| } | |
| function inferKind(name, mimeType = '') { | |
| const mime = String(mimeType || '').toLowerCase(); | |
| const ext = extensionFromName(name); | |
| if (mime.startsWith('image/') || ['png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'].includes(ext)) return 'image'; | |
| if (mime.startsWith('video/') || ['mp4', 'webm', 'mov'].includes(ext)) return 'video'; | |
| if (mime.startsWith('audio/') || ['mp3', 'wav', 'ogg', 'm4a'].includes(ext)) return 'audio'; | |
| if (mime === 'text/html' || ['html', 'htm'].includes(ext)) return 'rich_text'; | |
| if (mime.startsWith('text/') || ['txt', 'md', 'json', 'js', 'ts', 'css', 'py', 'html', 'xml', 'csv', 'rtf'].includes(ext)) return 'text'; | |
| return 'file'; | |
| } | |
| function guessMimeType(name, fallbackKind = 'file') { | |
| const ext = extensionFromName(name); | |
| switch (ext) { | |
| case 'txt': return 'text/plain'; | |
| case 'md': return 'text/markdown'; | |
| case 'json': return 'application/json'; | |
| case 'html': | |
| case 'htm': return 'text/html'; | |
| case 'css': return 'text/css'; | |
| case 'js': return 'application/javascript'; | |
| case 'ts': return 'text/plain'; | |
| case 'svg': return 'image/svg+xml'; | |
| case 'png': return 'image/png'; | |
| case 'jpg': | |
| case 'jpeg': return 'image/jpeg'; | |
| case 'gif': return 'image/gif'; | |
| case 'webp': return 'image/webp'; | |
| case 'mp4': return 'video/mp4'; | |
| case 'webm': return 'video/webm'; | |
| case 'mp3': return 'audio/mpeg'; | |
| case 'wav': return 'audio/wav'; | |
| case 'ogg': return 'audio/ogg'; | |
| case 'csv': return 'text/csv'; | |
| default: | |
| if (fallbackKind === 'rich_text') return 'text/html'; | |
| if (fallbackKind === 'text') return 'text/plain'; | |
| return 'application/octet-stream'; | |
| } | |
| } | |
| function mediaEntryAad(id) { | |
| return `media-entry:${id}`; | |
| } | |
| async function loadSqlEntries() { | |
| const { rows } = await pgQuery('SELECT id, payload FROM media_entries'); | |
| const entries = {}; | |
| for (const row of rows) { | |
| const entry = decryptJsonPayload(row.payload, mediaEntryAad(row.id)); | |
| if (entry?.id) entries[entry.id] = entry; | |
| } | |
| return entries; | |
| } | |
| async function ensureLoaded() { | |
| if (state.loaded) return; | |
| if (isPostgresStorageMode()) { | |
| state.index = { entries: await loadSqlEntries() }; | |
| state.loaded = true; | |
| await purgeExpiredInternal(); | |
| return; | |
| } | |
| const stored = await loadEncryptedJson(INDEX_FILE); | |
| state.index = { | |
| entries: stored?.entries || {}, | |
| }; | |
| state.loaded = true; | |
| await purgeExpiredInternal(); | |
| } | |
| async function saveIndex(changedIds = null, deletedIds = [], client = null) { | |
| if (isPostgresStorageMode()) { | |
| const idsToPersist = changedIds ? [...new Set(changedIds)] : Object.keys(state.index.entries); | |
| const idsToDelete = [...new Set(deletedIds || [])]; | |
| const run = async (runner) => { | |
| for (const id of idsToDelete) { | |
| await runner.query('DELETE FROM media_entries WHERE id = $1', [id]); | |
| } | |
| for (const id of idsToPersist) { | |
| const entry = state.index.entries[id]; | |
| if (!entry) continue; | |
| await runner.query( | |
| `INSERT INTO media_entries ( | |
| id, owner_lookup, parent_id, entry_type, updated_at, created_at, | |
| trashed_at, purge_at, expires_at, size_bytes, payload | |
| ) | |
| VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11::jsonb) | |
| ON CONFLICT (id) | |
| DO UPDATE SET | |
| owner_lookup = EXCLUDED.owner_lookup, | |
| parent_id = EXCLUDED.parent_id, | |
| entry_type = EXCLUDED.entry_type, | |
| updated_at = EXCLUDED.updated_at, | |
| created_at = EXCLUDED.created_at, | |
| trashed_at = EXCLUDED.trashed_at, | |
| purge_at = EXCLUDED.purge_at, | |
| expires_at = EXCLUDED.expires_at, | |
| size_bytes = EXCLUDED.size_bytes, | |
| payload = EXCLUDED.payload`, | |
| [ | |
| entry.id, | |
| makeOwnerLookup({ type: entry.ownerType, id: entry.ownerId }), | |
| entry.parentId || null, | |
| entry.type, | |
| entry.updatedAt || entry.createdAt, | |
| entry.createdAt, | |
| entry.trashedAt || null, | |
| entry.purgeAt || null, | |
| entry.expiresAt || null, | |
| entry.size || 0, | |
| JSON.stringify(encryptJsonPayload(entry, mediaEntryAad(entry.id))), | |
| ] | |
| ); | |
| } | |
| }; | |
| if (client) { | |
| await run(client); | |
| } else { | |
| await withPgTransaction(run); | |
| } | |
| return; | |
| } | |
| await saveEncryptedJson(INDEX_FILE, state.index); | |
| } | |
| function getEntry(id) { | |
| return state.index.entries[id] || null; | |
| } | |
| function canAccess(entry, owner) { | |
| return !!entry && entry.ownerType === owner.type && entry.ownerId === owner.id; | |
| } | |
| function sanitizeEntry(entry) { | |
| return { | |
| id: entry.id, | |
| type: entry.type, | |
| name: entry.name, | |
| parentId: entry.parentId || null, | |
| ownerType: entry.ownerType, | |
| ownerId: entry.ownerId, | |
| mimeType: entry.mimeType || null, | |
| kind: entry.kind || null, | |
| size: entry.size || 0, | |
| source: entry.source || null, | |
| createdAt: entry.createdAt, | |
| updatedAt: entry.updatedAt, | |
| trashedAt: entry.trashedAt || null, | |
| purgeAt: entry.purgeAt || null, | |
| sessionIds: entry.sessionIds || [], | |
| deletedByAssistant: !!entry.deletedByAssistant, | |
| }; | |
| } | |
| function blobPathFor(id) { | |
| return path.join(BLOBS_DIR, `${id}.bin`); | |
| } | |
| function buildAad(entry) { | |
| return `media:${entry.id}:${entry.ownerType}:${entry.ownerId}`; | |
| } | |
| async function saveBlob(entry, buffer, client = null) { | |
| if (isPostgresStorageMode()) { | |
| const runner = client || { query: pgQuery }; | |
| await runner.query( | |
| `INSERT INTO media_blobs (entry_id, updated_at, payload) | |
| VALUES ($1, $2, $3) | |
| ON CONFLICT (entry_id) | |
| DO UPDATE SET updated_at = EXCLUDED.updated_at, payload = EXCLUDED.payload`, | |
| [entry.id, entry.updatedAt || nowIso(), encryptBinaryPayload(buffer, buildAad(entry))] | |
| ); | |
| return; | |
| } | |
| await writeEncryptedFile(blobPathFor(entry.id), Buffer.from(buffer), buildAad(entry)); | |
| } | |
| async function loadBlob(entry) { | |
| if (isPostgresStorageMode()) { | |
| const { rows } = await pgQuery('SELECT payload FROM media_blobs WHERE entry_id = $1', [entry.id]); | |
| if (!rows[0]) throw new Error(`Missing blob for media entry ${entry.id}`); | |
| return decryptBinaryPayload(rows[0].payload, buildAad(entry)); | |
| } | |
| return readEncryptedFile(blobPathFor(entry.id), buildAad(entry)); | |
| } | |
| async function deleteBlob(id) { | |
| if (isPostgresStorageMode()) { | |
| await pgQuery('DELETE FROM media_blobs WHERE entry_id = $1', [id]); | |
| return; | |
| } | |
| await fs.rm(blobPathFor(id), { force: true }).catch(() => {}); | |
| } | |
| function isOwnedFile(entry, owner) { | |
| return entry?.type === 'file' && entry.ownerType === owner.type && entry.ownerId === owner.id; | |
| } | |
| function getChildren(owner, parentId, includeTrash = true) { | |
| return Object.values(state.index.entries).filter((entry) => | |
| entry.ownerType === owner.type && | |
| entry.ownerId === owner.id && | |
| (entry.parentId || null) === (parentId || null) && | |
| (includeTrash || !entry.trashedAt) | |
| ); | |
| } | |
| function collectDescendantIds(owner, rootId, acc = new Set()) { | |
| acc.add(rootId); | |
| const children = getChildren(owner, rootId, true); | |
| for (const child of children) { | |
| collectDescendantIds(owner, child.id, acc); | |
| } | |
| return [...acc]; | |
| } | |
| function resolveParentFolder(owner, parentId) { | |
| if (!parentId) return null; | |
| const parent = getEntry(parentId); | |
| if (!parent || !canAccess(parent, owner) || parent.type !== 'folder') return null; | |
| return parent.id; | |
| } | |
| function wouldCreateCycle(owner, entryId, candidateParentId) { | |
| let currentId = candidateParentId || null; | |
| while (currentId) { | |
| if (currentId === entryId) return true; | |
| const current = getEntry(currentId); | |
| if (!current || !canAccess(current, owner)) return false; | |
| currentId = current.parentId || null; | |
| } | |
| return false; | |
| } | |
| function createQuotaError(owner, usage) { | |
| const err = new Error('Cloud storage limit reached. Delete files or empty trash to free space.'); | |
| err.code = 'media:quota_exceeded'; | |
| err.status = 413; | |
| err.usage = usage || null; | |
| err.owner = owner; | |
| return err; | |
| } | |
| function computeUsage(owner) { | |
| const files = Object.values(state.index.entries).filter((entry) => isOwnedFile(entry, owner)); | |
| const totalBytes = files.reduce((sum, entry) => sum + (entry.size || 0), 0); | |
| const trashBytes = files.reduce((sum, entry) => sum + (entry.trashedAt ? (entry.size || 0) : 0), 0); | |
| const activeBytes = totalBytes - trashBytes; | |
| const quotaBytes = MEDIA_QUOTA_BYTES; | |
| return { | |
| quotaBytes, | |
| totalBytes, | |
| activeBytes, | |
| trashBytes, | |
| remainingBytes: Math.max(0, quotaBytes - totalBytes), | |
| percentUsed: quotaBytes > 0 ? Math.min(100, (totalBytes / quotaBytes) * 100) : 0, | |
| fileCount: files.length, | |
| trashFileCount: files.filter((entry) => !!entry.trashedAt).length, | |
| }; | |
| } | |
| function assertQuotaAvailable(owner, additionalBytes = 0) { | |
| const usage = computeUsage(owner); | |
| if ((usage.totalBytes + Math.max(0, additionalBytes)) > usage.quotaBytes) { | |
| throw createQuotaError(owner, usage); | |
| } | |
| return usage; | |
| } | |
| async function purgeEntry(id) { | |
| const entry = getEntry(id); | |
| if (!entry) return; | |
| if (entry.type === 'file' && !isPostgresStorageMode()) { | |
| await deleteBlob(id); | |
| } | |
| delete state.index.entries[id]; | |
| if (isPostgresStorageMode()) { | |
| await saveIndex([], [id]); | |
| return; | |
| } | |
| if (entry.type === 'file') await deleteBlob(id); | |
| } | |
| async function purgeExpiredInternal() { | |
| const now = Date.now(); | |
| let changed = false; | |
| for (const entry of Object.values(state.index.entries)) { | |
| const shouldPurge = | |
| (entry.purgeAt && new Date(entry.purgeAt).getTime() <= now) || | |
| (entry.expiresAt && new Date(entry.expiresAt).getTime() <= now); | |
| if (!shouldPurge) continue; | |
| for (const id of collectDescendantIds({ type: entry.ownerType, id: entry.ownerId }, entry.id)) { | |
| await purgeEntry(id); | |
| } | |
| changed = true; | |
| } | |
| if (changed && !isPostgresStorageMode()) await saveIndex(); | |
| } | |
| setInterval(() => { | |
| purgeExpiredInternal().catch((err) => console.error('mediaStore cleanup failed:', err)); | |
| }, 6 * 60 * 60 * 1000); | |
| export const mediaStore = { | |
| async list(owner, { view = 'active', parentId = null } = {}) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const includeTrash = view === 'trash'; | |
| const items = getChildren(owner, parentId, true) | |
| .filter((entry) => includeTrash ? !!entry.trashedAt : !entry.trashedAt) | |
| .sort((a, b) => { | |
| if (a.type !== b.type) return a.type === 'folder' ? -1 : 1; | |
| return new Date(b.updatedAt || b.createdAt).getTime() - new Date(a.updatedAt || a.createdAt).getTime(); | |
| }) | |
| .map(sanitizeEntry); | |
| const breadcrumbs = []; | |
| let currentId = parentId; | |
| while (currentId) { | |
| const entry = getEntry(currentId); | |
| if (!entry || !canAccess(entry, owner)) break; | |
| breadcrumbs.unshift({ id: entry.id, name: entry.name }); | |
| currentId = entry.parentId || null; | |
| } | |
| return { items, breadcrumbs, usage: computeUsage(owner) }; | |
| }, | |
| async get(owner, id) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const entry = getEntry(id); | |
| if (!canAccess(entry, owner)) return null; | |
| return sanitizeEntry(entry); | |
| }, | |
| async storeBuffer(owner, { | |
| name, | |
| mimeType, | |
| buffer, | |
| parentId = null, | |
| sessionId = null, | |
| source = 'upload', | |
| kind = null, | |
| deletedByAssistant = false, | |
| }) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| assertQuotaAvailable(owner, Buffer.byteLength(buffer)); | |
| const entryId = crypto.randomUUID(); | |
| const resolvedParentId = resolveParentFolder(owner, parentId); | |
| const fileName = normalizeName(name, 'Untitled'); | |
| const inferredKind = kind || inferKind(fileName, mimeType); | |
| const entry = { | |
| id: entryId, | |
| type: 'file', | |
| name: fileName, | |
| ownerType: owner.type, | |
| ownerId: owner.id, | |
| mimeType: mimeType || guessMimeType(fileName, inferredKind), | |
| kind: inferredKind, | |
| size: buffer.byteLength, | |
| parentId: resolvedParentId, | |
| source, | |
| createdAt: nowIso(), | |
| updatedAt: nowIso(), | |
| trashedAt: null, | |
| purgeAt: null, | |
| sessionIds: sessionId ? [sessionId] : [], | |
| expiresAt: owner.type === 'guest' ? new Date(Date.now() + GUEST_RETENTION_MS).toISOString() : null, | |
| deletedByAssistant, | |
| }; | |
| state.index.entries[entry.id] = entry; | |
| if (isPostgresStorageMode()) { | |
| await withPgTransaction(async (client) => { | |
| await saveBlob(entry, Buffer.from(buffer), client); | |
| await saveIndex([entry.id], [], client); | |
| }); | |
| } else { | |
| await saveBlob(entry, Buffer.from(buffer)); | |
| await saveIndex([entry.id]); | |
| } | |
| return sanitizeEntry(entry); | |
| }, | |
| async createFolder(owner, { name, parentId = null }) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const resolvedParentId = resolveParentFolder(owner, parentId); | |
| const folder = { | |
| id: crypto.randomUUID(), | |
| type: 'folder', | |
| name: normalizeName(name, 'New Folder'), | |
| ownerType: owner.type, | |
| ownerId: owner.id, | |
| parentId: resolvedParentId, | |
| createdAt: nowIso(), | |
| updatedAt: nowIso(), | |
| trashedAt: null, | |
| purgeAt: null, | |
| sessionIds: [], | |
| expiresAt: owner.type === 'guest' ? new Date(Date.now() + GUEST_RETENTION_MS).toISOString() : null, | |
| }; | |
| state.index.entries[folder.id] = folder; | |
| await saveIndex([folder.id]); | |
| return sanitizeEntry(folder); | |
| }, | |
| async createDocument(owner, { | |
| name, | |
| parentId = null, | |
| richText = false, | |
| content = '', | |
| source = 'upload', | |
| sessionId = null, | |
| }) { | |
| const normalizedName = normalizeName( | |
| name, | |
| richText ? 'Untitled Document.html' : 'Untitled Document.txt' | |
| ); | |
| return this.storeBuffer(owner, { | |
| name: normalizedName, | |
| mimeType: richText ? 'text/html' : 'text/plain', | |
| buffer: Buffer.from(content || (richText ? '<p></p>' : ''), 'utf8'), | |
| parentId, | |
| sessionId, | |
| source, | |
| kind: richText ? 'rich_text' : 'text', | |
| }); | |
| }, | |
| async readBuffer(owner, id) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const entry = getEntry(id); | |
| if (!entry || entry.type !== 'file' || !canAccess(entry, owner)) return null; | |
| return { | |
| entry: sanitizeEntry(entry), | |
| buffer: await loadBlob(entry), | |
| }; | |
| }, | |
| async readText(owner, id) { | |
| const loaded = await this.readBuffer(owner, id); | |
| if (!loaded) return null; | |
| return { | |
| entry: loaded.entry, | |
| text: loaded.buffer.toString('utf8'), | |
| }; | |
| }, | |
| async updateContent(owner, id, { buffer, name = null, mimeType = null, kind = null }) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const entry = getEntry(id); | |
| if (!entry || entry.type !== 'file' || !canAccess(entry, owner)) return null; | |
| const nextSize = Buffer.byteLength(buffer); | |
| const delta = nextSize - (entry.size || 0); | |
| if (delta > 0) assertQuotaAvailable(owner, delta); | |
| if (name) entry.name = normalizeName(name, entry.name); | |
| if (mimeType) entry.mimeType = mimeType; | |
| if (kind) entry.kind = kind; | |
| entry.size = nextSize; | |
| entry.updatedAt = nowIso(); | |
| if (isPostgresStorageMode()) { | |
| await withPgTransaction(async (client) => { | |
| await saveBlob(entry, Buffer.from(buffer), client); | |
| await saveIndex([entry.id], [], client); | |
| }); | |
| } else { | |
| await saveBlob(entry, Buffer.from(buffer)); | |
| await saveIndex([entry.id]); | |
| } | |
| return sanitizeEntry(entry); | |
| }, | |
| async rename(owner, id, name) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) return null; | |
| entry.name = normalizeName(name, entry.name || 'Untitled'); | |
| entry.updatedAt = nowIso(); | |
| await saveIndex([entry.id]); | |
| return sanitizeEntry(entry); | |
| }, | |
| async move(owner, ids, parentId = null) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const destinationId = parentId ? resolveParentFolder(owner, parentId) : null; | |
| if (parentId && !destinationId) throw new Error('Invalid destination folder'); | |
| const updated = []; | |
| const changedIds = new Set(); | |
| for (const id of ids || []) { | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) continue; | |
| if (destinationId === entry.id) continue; | |
| if (wouldCreateCycle(owner, entry.id, destinationId)) continue; | |
| entry.parentId = destinationId; | |
| entry.updatedAt = nowIso(); | |
| updated.push(sanitizeEntry(entry)); | |
| changedIds.add(entry.id); | |
| } | |
| if (changedIds.size) await saveIndex([...changedIds]); | |
| return updated; | |
| }, | |
| async moveToTrash(owner, ids) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const trashed = []; | |
| const changedIds = new Set(); | |
| const now = nowIso(); | |
| const purgeAt = new Date(Date.now() + TRASH_RETENTION_MS).toISOString(); | |
| for (const id of ids || []) { | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) continue; | |
| for (const targetId of collectDescendantIds(owner, id)) { | |
| const target = getEntry(targetId); | |
| if (!target) continue; | |
| target.trashedAt = now; | |
| target.purgeAt = purgeAt; | |
| target.updatedAt = now; | |
| trashed.push(sanitizeEntry(target)); | |
| changedIds.add(target.id); | |
| } | |
| } | |
| if (changedIds.size) await saveIndex([...changedIds]); | |
| return trashed; | |
| }, | |
| async restore(owner, ids) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const restored = []; | |
| const changedIds = new Set(); | |
| for (const id of ids || []) { | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) continue; | |
| for (const targetId of collectDescendantIds(owner, id)) { | |
| const target = getEntry(targetId); | |
| if (!target) continue; | |
| target.trashedAt = null; | |
| target.purgeAt = null; | |
| target.updatedAt = nowIso(); | |
| restored.push(sanitizeEntry(target)); | |
| changedIds.add(target.id); | |
| } | |
| } | |
| if (changedIds.size) await saveIndex([...changedIds]); | |
| return restored; | |
| }, | |
| async deleteForever(owner, ids) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| const removedIds = new Set(); | |
| for (const id of ids || []) { | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) continue; | |
| for (const targetId of collectDescendantIds(owner, id)) { | |
| removedIds.add(targetId); | |
| } | |
| } | |
| for (const targetId of removedIds) { | |
| await purgeEntry(targetId); | |
| } | |
| if (removedIds.size && !isPostgresStorageMode()) await saveIndex(); | |
| return [...removedIds]; | |
| }, | |
| async attachToSession(owner, ids, sessionId) { | |
| ensureOwner(owner); | |
| if (!sessionId) return []; | |
| await ensureLoaded(); | |
| const updated = []; | |
| const changedIds = new Set(); | |
| for (const id of ids || []) { | |
| const entry = getEntry(id); | |
| if (!entry || !canAccess(entry, owner)) continue; | |
| entry.sessionIds = [...new Set([...(entry.sessionIds || []), sessionId])]; | |
| entry.updatedAt = nowIso(); | |
| updated.push(sanitizeEntry(entry)); | |
| changedIds.add(entry.id); | |
| } | |
| if (changedIds.size) await saveIndex([...changedIds]); | |
| return updated; | |
| }, | |
| async getUsage(owner) { | |
| ensureOwner(owner); | |
| await ensureLoaded(); | |
| return computeUsage(owner); | |
| }, | |
| }; | |