Update stateManager.js
Browse files- stateManager.js +81 -31
stateManager.js
CHANGED
|
@@ -3,21 +3,32 @@ import { createClient } from '@supabase/supabase-js';
|
|
| 3 |
|
| 4 |
let supabase = null;
|
| 5 |
const activeProjects = new Map();
|
| 6 |
-
const initializationLocks = new Set();
|
| 7 |
|
| 8 |
export const initDB = () => {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
supabase = createClient(process.env.SUPABASE_URL, process.env.SUPABASE_SERVICE_ROLE_KEY);
|
| 10 |
};
|
| 11 |
|
| 12 |
export const StateManager = {
|
| 13 |
-
// NEW: Check if project is already being initialized
|
| 14 |
isLocked: (projectId) => initializationLocks.has(projectId),
|
| 15 |
lock: (projectId) => initializationLocks.add(projectId),
|
| 16 |
unlock: (projectId) => initializationLocks.delete(projectId),
|
| 17 |
|
| 18 |
getProject: async (projectId) => {
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
|
|
|
| 21 |
const { data: proj, error } = await supabase.from('projects').select('*').eq('id', projectId).single();
|
| 22 |
if (error || !proj) return null;
|
| 23 |
|
|
@@ -25,14 +36,16 @@ export const StateManager = {
|
|
| 25 |
.select('*').eq('project_id', projectId)
|
| 26 |
.order('chunk_index', { ascending: false }).limit(10);
|
| 27 |
|
|
|
|
| 28 |
const memoryObject = {
|
| 29 |
...proj.info,
|
| 30 |
id: proj.id,
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
| 36 |
lastActive: Date.now()
|
| 37 |
};
|
| 38 |
|
|
@@ -40,35 +53,48 @@ export const StateManager = {
|
|
| 40 |
return memoryObject;
|
| 41 |
},
|
| 42 |
|
| 43 |
-
// ADVANCED SYNC: Always backs up immediately to DB
|
| 44 |
addHistory: async (projectId, type, role, text) => {
|
| 45 |
const newMessage = { role, parts: [{ text }] };
|
| 46 |
|
| 47 |
-
// 1. Update local memory
|
| 48 |
const project = activeProjects.get(projectId);
|
| 49 |
if (project) {
|
| 50 |
const historyKey = type === 'pm' ? 'pmHistory' : 'workerHistory';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
project[historyKey].push(newMessage);
|
| 52 |
}
|
| 53 |
|
| 54 |
-
// 2.
|
| 55 |
-
const { data: chunks } = await supabase.from('message_chunks')
|
| 56 |
-
.select('*')
|
| 57 |
-
.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
const latest = chunks?.[0];
|
| 60 |
|
| 61 |
-
if
|
| 62 |
-
|
| 63 |
-
|
|
|
|
| 64 |
const { error } = await supabase.from('message_chunks')
|
| 65 |
.update({ payload: updatedPayload })
|
| 66 |
.eq('id', latest.id);
|
| 67 |
|
| 68 |
if (error) console.error(`[DB ERROR] Failed to append ${type} history:`, error.message);
|
| 69 |
} else {
|
| 70 |
-
// CREATE NEW CHUNK
|
| 71 |
-
const nextIndex = latest ? latest.chunk_index + 1 : 0;
|
| 72 |
const { error } = await supabase.from('message_chunks').insert({
|
| 73 |
project_id: projectId,
|
| 74 |
type,
|
|
@@ -81,24 +107,48 @@ export const StateManager = {
|
|
| 81 |
},
|
| 82 |
|
| 83 |
updateProject: async (projectId, data) => {
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
activeProjects.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
const payload = {
|
| 89 |
info: {
|
| 90 |
-
title:
|
| 91 |
-
status:
|
| 92 |
-
stats:
|
| 93 |
-
description:
|
| 94 |
-
commandQueue:
|
| 95 |
-
failureCount:
|
| 96 |
}
|
| 97 |
};
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
},
|
| 103 |
|
| 104 |
getSupabaseClient: () => supabase
|
|
|
|
| 3 |
|
| 4 |
let supabase = null;
|
| 5 |
const activeProjects = new Map();
|
| 6 |
+
const initializationLocks = new Set();
|
| 7 |
|
| 8 |
export const initDB = () => {
|
| 9 |
+
if (!process.env.SUPABASE_URL || !process.env.SUPABASE_SERVICE_ROLE_KEY) {
|
| 10 |
+
console.error("Missing Supabase Env Variables");
|
| 11 |
+
return;
|
| 12 |
+
}
|
| 13 |
supabase = createClient(process.env.SUPABASE_URL, process.env.SUPABASE_SERVICE_ROLE_KEY);
|
| 14 |
};
|
| 15 |
|
| 16 |
export const StateManager = {
|
|
|
|
| 17 |
isLocked: (projectId) => initializationLocks.has(projectId),
|
| 18 |
lock: (projectId) => initializationLocks.add(projectId),
|
| 19 |
unlock: (projectId) => initializationLocks.delete(projectId),
|
| 20 |
|
| 21 |
getProject: async (projectId) => {
|
| 22 |
+
// 1. Check Cache
|
| 23 |
+
if (activeProjects.has(projectId)) {
|
| 24 |
+
const cached = activeProjects.get(projectId);
|
| 25 |
+
// Extra safety: Ensure it's a "Real" project object with history arrays
|
| 26 |
+
if (cached.workerHistory && cached.pmHistory) {
|
| 27 |
+
return cached;
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
|
| 31 |
+
// 2. Fetch from DB
|
| 32 |
const { data: proj, error } = await supabase.from('projects').select('*').eq('id', projectId).single();
|
| 33 |
if (error || !proj) return null;
|
| 34 |
|
|
|
|
| 36 |
.select('*').eq('project_id', projectId)
|
| 37 |
.order('chunk_index', { ascending: false }).limit(10);
|
| 38 |
|
| 39 |
+
// 3. Construct Full Memory Object
|
| 40 |
const memoryObject = {
|
| 41 |
...proj.info,
|
| 42 |
id: proj.id,
|
| 43 |
+
user_id: proj.user_id, // Note: Supabase usually uses snake_case 'user_id'
|
| 44 |
+
userId: proj.user_id, // Keeping camelCase for compatibility
|
| 45 |
+
workerHistory: (chunks || []).filter(c => c.type === 'worker').reverse().flatMap(c => c.payload || []),
|
| 46 |
+
pmHistory: (chunks || []).filter(c => c.type === 'pm').reverse().flatMap(c => c.payload || []),
|
| 47 |
+
commandQueue: proj.info?.commandQueue || [],
|
| 48 |
+
failureCount: proj.info?.failureCount || 0,
|
| 49 |
lastActive: Date.now()
|
| 50 |
};
|
| 51 |
|
|
|
|
| 53 |
return memoryObject;
|
| 54 |
},
|
| 55 |
|
|
|
|
| 56 |
addHistory: async (projectId, type, role, text) => {
|
| 57 |
const newMessage = { role, parts: [{ text }] };
|
| 58 |
|
| 59 |
+
// 1. Update local memory (Optimistic)
|
| 60 |
const project = activeProjects.get(projectId);
|
| 61 |
if (project) {
|
| 62 |
const historyKey = type === 'pm' ? 'pmHistory' : 'workerHistory';
|
| 63 |
+
|
| 64 |
+
// FIX: Ensure the array exists before pushing
|
| 65 |
+
if (!Array.isArray(project[historyKey])) {
|
| 66 |
+
project[historyKey] = [];
|
| 67 |
+
}
|
| 68 |
project[historyKey].push(newMessage);
|
| 69 |
}
|
| 70 |
|
| 71 |
+
// 2. Database Sync
|
| 72 |
+
const { data: chunks, error: fetchError } = await supabase.from('message_chunks')
|
| 73 |
+
.select('*')
|
| 74 |
+
.eq('project_id', projectId)
|
| 75 |
+
.eq('type', type)
|
| 76 |
+
.order('chunk_index', { ascending: false })
|
| 77 |
+
.limit(1);
|
| 78 |
+
|
| 79 |
+
if (fetchError) {
|
| 80 |
+
console.error(`[DB ERROR] Fetching chunks for ${projectId}:`, fetchError.message);
|
| 81 |
+
return;
|
| 82 |
+
}
|
| 83 |
|
| 84 |
const latest = chunks?.[0];
|
| 85 |
|
| 86 |
+
// Check if we can append to the latest chunk (limit 20 messages per chunk)
|
| 87 |
+
if (latest && latest.payload && latest.payload.length < 20) {
|
| 88 |
+
// APPEND
|
| 89 |
+
const updatedPayload = [...(latest.payload || []), newMessage]; // Safety check on payload
|
| 90 |
const { error } = await supabase.from('message_chunks')
|
| 91 |
.update({ payload: updatedPayload })
|
| 92 |
.eq('id', latest.id);
|
| 93 |
|
| 94 |
if (error) console.error(`[DB ERROR] Failed to append ${type} history:`, error.message);
|
| 95 |
} else {
|
| 96 |
+
// CREATE NEW CHUNK
|
| 97 |
+
const nextIndex = latest ? (latest.chunk_index + 1) : 0;
|
| 98 |
const { error } = await supabase.from('message_chunks').insert({
|
| 99 |
project_id: projectId,
|
| 100 |
type,
|
|
|
|
| 107 |
},
|
| 108 |
|
| 109 |
updateProject: async (projectId, data) => {
|
| 110 |
+
// FIX: Don't create a "ghost" object if project isn't in memory.
|
| 111 |
+
// If it's not in memory, we update DB only. Next getProject() will fetch fresh data.
|
| 112 |
+
if (activeProjects.has(projectId)) {
|
| 113 |
+
const current = activeProjects.get(projectId);
|
| 114 |
+
const newData = { ...current, ...data, lastActive: Date.now() };
|
| 115 |
+
activeProjects.set(projectId, newData);
|
| 116 |
+
}
|
| 117 |
|
| 118 |
+
// Prepare DB Payload (Flattening for JSONB 'info' column)
|
| 119 |
+
// We fetch current DB state first if we want to be perfectly safe,
|
| 120 |
+
// but typically overwriting specific fields in 'info' is handled via logic.
|
| 121 |
+
// Since 'info' is a JSONB column, we usually need to merge carefully or replace.
|
| 122 |
+
// Assuming your architecture passes the *full* info object or we merge blindly here:
|
| 123 |
+
|
| 124 |
+
// Note: Ideally, we should fetch the current info from DB to merge if not in memory,
|
| 125 |
+
// but for performance, we assume caller knows what they are doing.
|
| 126 |
+
|
| 127 |
const payload = {
|
| 128 |
info: {
|
| 129 |
+
title: data.title, // If undefined, it might clear it if we aren't careful.
|
| 130 |
+
status: data.status,
|
| 131 |
+
stats: data.stats,
|
| 132 |
+
description: data.description,
|
| 133 |
+
commandQueue: data.commandQueue,
|
| 134 |
+
failureCount: data.failureCount
|
| 135 |
}
|
| 136 |
};
|
| 137 |
|
| 138 |
+
// Clean payload: Remove undefined keys to prevent wiping DB data
|
| 139 |
+
Object.keys(payload.info).forEach(key => payload.info[key] === undefined && delete payload.info[key]);
|
| 140 |
+
|
| 141 |
+
// Supabase JSONB update: This replaces the 'info' column content.
|
| 142 |
+
// If you want deep merge, you need a specific Postgres function or fetch-modify-write.
|
| 143 |
+
// For now, we do a basic update on the column.
|
| 144 |
+
|
| 145 |
+
// Fetch current to ensure we don't wipe unrelated info fields
|
| 146 |
+
const { data: currentDb } = await supabase.from('projects').select('info').eq('id', projectId).single();
|
| 147 |
+
if (currentDb) {
|
| 148 |
+
const mergedInfo = { ...currentDb.info, ...payload.info };
|
| 149 |
+
const { error } = await supabase.from('projects').update({ info: mergedInfo }).eq('id', projectId);
|
| 150 |
+
if (error) console.error("[DB ERROR] Update Project failed:", error.message);
|
| 151 |
+
}
|
| 152 |
},
|
| 153 |
|
| 154 |
getSupabaseClient: () => supabase
|