Keeby-smilyai commited on
Commit
90fcfde
·
verified ·
1 Parent(s): 91bc418

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +69 -159
backend.py CHANGED
@@ -1,4 +1,4 @@
1
- # backend.py — REFACTORED FOR ROBUSTNESS & RELIABILITY
2
  import sqlite3
3
  import os
4
  import json
@@ -18,45 +18,19 @@ DB_PATH = "code_agents_pro.db"
18
  PROJECT_ROOT = "./projects"
19
  os.makedirs(PROJECT_ROOT, exist_ok=True)
20
 
21
-
22
- # ------------------------------ DATABASE (REFACTORED WITH CONTEXT MANAGERS) ------------------------------
23
  def init_db():
24
- """Initializes the database schema safely."""
25
  with sqlite3.connect(DB_PATH) as conn:
26
  cursor = conn.cursor()
27
  cursor.executescript("""
28
- CREATE TABLE IF NOT EXISTS users (
29
- id INTEGER PRIMARY KEY AUTOINCREMENT,
30
- username TEXT UNIQUE NOT NULL,
31
- password_hash TEXT NOT NULL
32
- );
33
- CREATE TABLE IF NOT EXISTS projects (
34
- id INTEGER PRIMARY KEY AUTOINCREMENT,
35
- user_id INTEGER NOT NULL,
36
- title TEXT,
37
- description TEXT,
38
- status TEXT DEFAULT 'queued',
39
- zip_path TEXT,
40
- logs TEXT DEFAULT '',
41
- created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
42
- FOREIGN KEY (user_id) REFERENCES users(id)
43
- );
44
  CREATE INDEX IF NOT EXISTS idx_user_status ON projects(user_id, status);
45
  """)
46
- conn.commit()
47
 
48
  init_db()
49
 
50
- # --- USER & PROJECT CRUD (ALL REFACTORED FOR SAFETY) ---
51
-
52
- def hash_password(password):
53
- return hashlib.sha256(password.encode()).hexdigest()
54
-
55
- def verify_password(password, stored_hash):
56
- return hash_password(password) == stored_hash
57
-
58
  def _db_execute(query, params=(), fetchone=False, fetchall=False, commit=False):
59
- """A centralized, safe way to interact with the database."""
60
  try:
61
  with sqlite3.connect(DB_PATH) as conn:
62
  conn.row_factory = sqlite3.Row
@@ -65,43 +39,25 @@ def _db_execute(query, params=(), fetchone=False, fetchall=False, commit=False):
65
  if commit:
66
  conn.commit()
67
  return cursor.lastrowid
68
- if fetchone:
69
- return cursor.fetchone()
70
- if fetchall:
71
- return cursor.fetchall()
72
  except sqlite3.Error as e:
73
  print(f"Database error: {e}")
74
  return None
75
 
 
 
 
76
  def create_user(username, password):
77
- query = "INSERT INTO users (username, password_hash) VALUES (?, ?)"
78
- # Using a try-except block here specifically for the IntegrityError
79
  try:
80
- return _db_execute(query, (username, hash_password(password)), commit=True)
81
- except sqlite3.IntegrityError:
82
- return None
83
-
84
- def get_user_by_username(username):
85
- return _db_execute("SELECT id, password_hash FROM users WHERE username = ?", (username,), fetchone=True)
86
 
87
- def get_user_projects(user_id, limit=3):
88
- query = "SELECT * FROM projects WHERE user_id = ? ORDER BY created_at DESC LIMIT ?"
89
- return _db_execute(query, (user_id, limit), fetchall=True)
90
-
91
- def create_project(user_id, title, description):
92
- query = "INSERT INTO projects (user_id, title, description) VALUES (?, ?, ?)"
93
- return _db_execute(query, (user_id, title, description), commit=True)
94
-
95
- def update_project_status(project_id, status, logs=None, zip_path=None):
96
- query = "UPDATE projects SET status = ?, logs = COALESCE(?, logs), zip_path = COALESCE(?, zip_path) WHERE id = ?"
97
- _db_execute(query, (status, logs, zip_path, project_id), commit=True)
98
-
99
- def get_project(project_id):
100
- return _db_execute("SELECT * FROM projects WHERE id = ?", (project_id,), fetchone=True)
101
-
102
- def search_projects(user_id, query):
103
- sql_query = "SELECT id, title, description, status FROM projects WHERE user_id = ? AND (title LIKE ? OR description LIKE ?)"
104
- return _db_execute(sql_query, (user_id, f"%{query}%", f"%{query}%"), fetchall=True)
105
 
106
  # ------------------------------ MODEL LOADING & CACHING ------------------------------
107
  MODEL_REGISTRY = {
@@ -112,65 +68,32 @@ MODEL_REGISTRY = {
112
  "tester": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
113
  "publisher": "microsoft/Phi-3-mini-4k-instruct",
114
  }
115
-
116
  _MODEL_CACHE = {}
117
 
118
  def load_model(model_name):
119
- if model_name in _MODEL_CACHE:
120
- return _MODEL_CACHE[model_name]
121
-
122
  print(f"Loading model: {model_name}...")
123
  tokenizer = AutoTokenizer.from_pretrained(model_name)
124
- model = AutoModelForCausalLM.from_pretrained(
125
- model_name,
126
- torch_dtype="auto",
127
- device_map="auto",
128
- trust_remote_code=True,
129
- attn_implementation="eager"
130
- )
131
  _MODEL_CACHE[model_name] = (tokenizer, model)
132
  print(f"Model {model_name} loaded and cached.")
133
  return tokenizer, model
134
 
135
- # ------------------------------ AGENT PROMPTS ------------------------------
136
  ROLE_PROMPTS = {
137
- "planner": """You are an expert software planner. Your task is to break down a user's request into a detailed, actionable plan.
138
- - Clarify the core purpose.
139
- - List the key features.
140
- - Determine the necessary libraries and dependencies.
141
- - Define a complete file structure.
142
- Output ONLY a single JSON object with the keys: "purpose", "features", "dependencies", "files".
143
- The "files" key should be an array of strings representing file paths (e.g., ["src/main.py", "tests/test_main.py"]).""",
144
-
145
- "architect": """You are a software architect. Based on the file plan, you will create the initial content for each file.
146
- - For configuration files (like requirements.txt), list the dependencies.
147
- - For source code files, write a brief comment or placeholder (e.g., '# TODO: Implement main logic').
148
- - For documentation (README.md), write a title.
149
- Output ONLY a single JSON object where keys are file paths and values are the initial content.""",
150
-
151
- "coder": """You are a professional programmer. Your only job is to write clean, functional, and complete code for a given file based on instructions.
152
- - You must write the full code for the file path provided.
153
- - Do NOT add any explanations, introductions, or markdown formatting.
154
- - Your output must be ONLY the raw source code for the file.""",
155
-
156
- "reviewer": """You are a meticulous code reviewer. Your task is to analyze a file's code and identify potential bugs, style issues, or missing features.
157
- - Check for logical errors, security vulnerabilities, and bad practices.
158
- - Ensure the code is readable and well-commented.
159
- - Output ONLY a single JSON object with two keys: "has_issues" (boolean) and "suggestions" (a string containing a bulleted list of required changes). If no issues, "has_issues" is false.""",
160
-
161
- "tester": """You are a quality assurance engineer specializing in automated testing. Your task is to write a complete pytest test file for a given source code file.
162
- - Cover the main functionality and edge cases.
163
- - Use appropriate assertions.
164
- - Your output must be ONLY the raw source code for the test file.""",
165
-
166
- "publisher": """You are a release manager. Your job is to create the final project documentation and configuration based on the complete file structure.
167
- - Create a helpful README.md that explains the project's purpose and how to run it.
168
- - Create a standard .gitignore file.
169
- - Create a basic Dockerfile for containerization.
170
- Output ONLY a single JSON object where keys are the filenames ("README.md", ".gitignore", "Dockerfile") and values are their complete content."""
171
  }
172
-
173
- # ------------------------------ FILE SYSTEM TOOLS ------------------------------
174
  def get_project_dir(user_id, project_id):
175
  path = os.path.join(PROJECT_ROOT, str(user_id), str(project_id))
176
  os.makedirs(path, exist_ok=True)
@@ -179,147 +102,134 @@ def get_project_dir(user_id, project_id):
179
  def create_file(project_dir, path, content):
180
  full_path = os.path.join(project_dir, path)
181
  os.makedirs(os.path.dirname(full_path), exist_ok=True)
182
- with open(full_path, 'w', encoding='utf-8') as f:
183
- f.write(content)
184
 
185
  def read_file(project_dir, path):
186
  full_path = os.path.join(project_dir, path)
187
- if not os.path.exists(full_path): return None
188
- with open(full_path, 'r', encoding='utf-8') as f:
189
- return f.read()
190
 
191
  def zip_project(project_dir, project_id):
192
  zip_filename = f"project_{project_id}.zip"
193
- zip_path = os.path.join(os.path.dirname(project_dir), zip_filename) # Place zip in user folder, not project folder
194
  with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
195
  for root, _, files in os.walk(project_dir):
196
- for file in files:
197
- full_path = os.path.join(root, file)
198
- arcname = os.path.relpath(full_path, project_dir)
199
- zf.write(full_path, arcname)
200
  return zip_path
201
 
202
- # ------------------------------ CORE AI & AGENT LOGIC ------------------------------
203
  def _extract_json(text: str) -> Optional[Dict[str, Any]]:
204
- """Robustly extracts a JSON object from a string, even if wrapped in markdown."""
205
  match = re.search(r"```json\s*([\s\S]*?)\s*```|(\{[\s\S]*\})", text)
206
- if not match:
207
- return None
208
  json_str = match.group(1) or match.group(2)
209
- try:
210
- return json.loads(json_str)
211
  except json.JSONDecodeError:
212
  print(f"Failed to decode JSON: {json_str[:200]}...")
213
  return None
214
 
215
  def generate_with_model(role: str, prompt: str) -> str:
216
- """Generates a response from a cached model for a given role and prompt."""
217
  try:
218
  model_name = MODEL_REGISTRY[role]
219
  tokenizer, model = load_model(model_name)
220
-
221
  messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
222
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
223
  inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
224
-
225
  outputs = model.generate(**inputs, max_new_tokens=2048, pad_token_id=tokenizer.eos_token_id)
226
- response = tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True)
227
- return response.strip()
228
-
229
  except Exception as e:
230
  print(f"Error during model generation for role {role}: {e}")
231
  return f'{{"error": "Failed to generate response: {str(e)}"}}'
232
 
233
- # ------------------------------ THE AGENT CHAIN EXECUTOR ------------------------------
234
  def run_agent_chain(project_id, user_id, initial_prompt):
235
  project_dir = get_project_dir(user_id, project_id)
236
  log_entries = []
237
 
238
  def log_step(agent, action, output=""):
239
  log_entry = f"**[{agent.upper()}]**: {action}\n"
240
- if output:
241
- log_entry += f"```\n{output[:1000]}{'...' if len(output) > 1000 else ''}\n```\n---\n"
242
  log_entries.append(log_entry)
243
  update_project_status(project_id, "running", logs="".join(log_entries))
244
 
245
  try:
246
- log_step("SYSTEM", f"Initializing project directory: {project_dir}")
247
  update_project_status(project_id, "running", logs="Agent team is assembling...")
248
 
249
  # 1. PLANNER
250
  log_step("PLANNER", "Analyzing user request and creating a project plan...")
251
  plan_response = generate_with_model("planner", initial_prompt)
252
  plan_data = _extract_json(plan_response)
253
- if not plan_data: raise ValueError("Planner failed to create a valid JSON plan.")
 
 
 
 
 
 
254
  log_step("PLANNER", "Plan created successfully.", json.dumps(plan_data, indent=2))
255
 
256
  # 2. ARCHITECT
257
- log_step("ARCHITECT", "Creating initial file skeletons based on the plan...")
258
- arch_prompt = f"Create initial content for the following files:\n{json.dumps(plan_data['files'])}"
259
  arch_response = generate_with_model("architect", arch_prompt)
260
  arch_data = _extract_json(arch_response)
261
  if not arch_data: raise ValueError("Architect failed to create valid JSON file structures.")
262
- for path, content in arch_data.items():
263
- create_file(project_dir, path, content)
264
  log_step("ARCHITECT", "File skeletons created.", "\n".join(arch_data.keys()))
265
 
266
  # 3. CODER
267
  source_files = [f for f in plan_data['files'] if f.startswith('src/') and f.endswith('.py')]
268
  for file_path in source_files:
269
  log_step("CODER", f"Writing complete code for `{file_path}`...")
270
- coder_prompt = f"Based on the project plan ({plan_data['purpose']}), write the full Python code for the file: `{file_path}`."
271
  code = generate_with_model("coder", coder_prompt)
272
  create_file(project_dir, file_path, code)
273
- log_step("CODER", f"Finished writing code for `{file_path}`.", code)
274
 
275
  # 4. REVIEWER
276
- log_step("REVIEWER", "Reviewing all generated source code for issues...")
277
  for file_path in source_files:
278
  code_content = read_file(project_dir, file_path)
279
- review_prompt = f"Review the following code from `{file_path}`:\n\n{code_content}"
 
280
  review_response = generate_with_model("reviewer", review_prompt)
281
  review_data = _extract_json(review_response)
282
- if review_data and review_data.get("has_issues"):
283
- log_step("REVIEWER", f"Found issues in `{file_path}`.", review_data.get("suggestions"))
284
- # In a more advanced version, this would trigger the CODER again (a "fix loop")
285
- else:
286
- log_step("REVIEWER", f"No major issues found in `{file_path}`.")
287
 
288
  # 5. TESTER
289
  log_step("TESTER", "Writing unit tests for all source code...")
290
  for file_path in source_files:
291
  code_content = read_file(project_dir, file_path)
 
292
  test_file_path = os.path.join("tests", f"test_{os.path.basename(file_path)}")
293
- tester_prompt = f"Write a complete pytest test file named `{test_file_path}` for the following code from `{file_path}`:\n\n{code_content}"
294
  test_code = generate_with_model("tester", tester_prompt)
295
  create_file(project_dir, test_file_path, test_code)
296
- log_step("TESTER", f"Generated test file `{test_file_path}`.", test_code)
297
 
298
  # 6. PUBLISHER
299
- log_step("PUBLISHER", "Generating final documentation and configuration files...")
300
- pub_prompt = f"The project has the following files: {json.dumps(list(os.walk(project_dir)))}. Generate README.md, .gitignore, and Dockerfile."
 
301
  pub_response = generate_with_model("publisher", pub_prompt)
302
  pub_data = _extract_json(pub_response)
303
  if not pub_data: raise ValueError("Publisher failed to create valid final assets.")
304
- for path, content in pub_data.items():
305
- create_file(project_dir, path, content)
306
  log_step("PUBLISHER", "Final assets created.", json.dumps(pub_data, indent=2))
307
 
308
  # 7. FINALIZATION
309
- log_step("SYSTEM", "Packaging the complete project into a ZIP file...")
310
  zip_path = zip_project(project_dir, project_id)
311
  update_project_status(project_id, "completed", logs="".join(log_entries), zip_path=zip_path)
312
- log_step("SYSTEM", "Project completed successfully!", f"Download available at: {zip_path}")
313
 
314
  except Exception as e:
315
  tb_str = traceback.format_exc()
316
  print(f"--- AGENT CHAIN FAILED for project {project_id} ---\n{tb_str}\n--------------------")
317
- error_log = "".join(log_entries) + f"\n\n❌ **CRITICAL ERROR:**\nAn unexpected error occurred and the process had to stop.\n\n**Details:**\n```{str(e)}\n\n{tb_str}```"
318
  update_project_status(project_id, "failed", logs=error_log)
319
 
320
-
321
  # ------------------------------ JOB QUEUE ------------------------------
322
- # Set to 1 worker to prevent VRAM OOM errors by running models sequentially.
323
  executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
324
 
325
  def queue_job(project_id, user_id, prompt):
 
1
+ # backend.py — FINAL HARDENED VERSION
2
  import sqlite3
3
  import os
4
  import json
 
18
  PROJECT_ROOT = "./projects"
19
  os.makedirs(PROJECT_ROOT, exist_ok=True)
20
 
21
+ # ------------------------------ DATABASE (ROBUST) ------------------------------
 
22
  def init_db():
 
23
  with sqlite3.connect(DB_PATH) as conn:
24
  cursor = conn.cursor()
25
  cursor.executescript("""
26
+ CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username TEXT UNIQUE, password_hash TEXT);
27
+ CREATE TABLE IF NOT EXISTS projects (id INTEGER PRIMARY KEY, user_id INTEGER, title TEXT, description TEXT, status TEXT DEFAULT 'queued', zip_path TEXT, logs TEXT DEFAULT '', created_at DATETIME DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (user_id) REFERENCES users(id));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  CREATE INDEX IF NOT EXISTS idx_user_status ON projects(user_id, status);
29
  """)
 
30
 
31
  init_db()
32
 
 
 
 
 
 
 
 
 
33
  def _db_execute(query, params=(), fetchone=False, fetchall=False, commit=False):
 
34
  try:
35
  with sqlite3.connect(DB_PATH) as conn:
36
  conn.row_factory = sqlite3.Row
 
39
  if commit:
40
  conn.commit()
41
  return cursor.lastrowid
42
+ if fetchone: return cursor.fetchone()
43
+ if fetchall: return cursor.fetchall()
 
 
44
  except sqlite3.Error as e:
45
  print(f"Database error: {e}")
46
  return None
47
 
48
+ def hash_password(password): return hashlib.sha256(password.encode()).hexdigest()
49
+ def verify_password(password, stored_hash): return hash_password(password) == stored_hash
50
+
51
  def create_user(username, password):
 
 
52
  try:
53
+ return _db_execute("INSERT INTO users (username, password_hash) VALUES (?, ?)", (username, hash_password(password)), commit=True)
54
+ except sqlite3.IntegrityError: return None
 
 
 
 
55
 
56
+ def get_user_by_username(username): return _db_execute("SELECT * FROM users WHERE username = ?", (username,), fetchone=True)
57
+ def get_user_projects(user_id, limit=20): return _db_execute("SELECT * FROM projects WHERE user_id = ? ORDER BY created_at DESC LIMIT ?", (user_id, limit), fetchall=True)
58
+ def create_project(user_id, title, description): return _db_execute("INSERT INTO projects (user_id, title, description) VALUES (?, ?, ?)", (user_id, title, description), commit=True)
59
+ def update_project_status(project_id, status, logs=None, zip_path=None): _db_execute("UPDATE projects SET status=?, logs=COALESCE(?, logs), zip_path=COALESCE(?, zip_path) WHERE id=?", (status, logs, zip_path, project_id), commit=True)
60
+ def get_project(project_id): return _db_execute("SELECT * FROM projects WHERE id = ?", (project_id,), fetchone=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  # ------------------------------ MODEL LOADING & CACHING ------------------------------
63
  MODEL_REGISTRY = {
 
68
  "tester": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
69
  "publisher": "microsoft/Phi-3-mini-4k-instruct",
70
  }
 
71
  _MODEL_CACHE = {}
72
 
73
  def load_model(model_name):
74
+ if model_name in _MODEL_CACHE: return _MODEL_CACHE[model_name]
 
 
75
  print(f"Loading model: {model_name}...")
76
  tokenizer = AutoTokenizer.from_pretrained(model_name)
77
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto", trust_remote_code=True, attn_implementation="eager")
 
 
 
 
 
 
78
  _MODEL_CACHE[model_name] = (tokenizer, model)
79
  print(f"Model {model_name} loaded and cached.")
80
  return tokenizer, model
81
 
82
+ # ------------------------------ AGENT PROMPTS (SIMPLIFIED & ROBUST) ------------------------------
83
  ROLE_PROMPTS = {
84
+ "planner": """You are an expert software planner. Break down the user's request into a detailed plan.
85
+ Output ONLY a single JSON object with the keys: "purpose", "features", "dependencies", and "files".
86
+ The "files" key MUST be an array of strings representing complete file paths (e.g., ["src/main.py", "tests/test_main.py", "requirements.txt"]).""",
87
+ "architect": """You are a software architect. Create initial placeholder content for a list of files.
88
+ Output ONLY a single JSON object where keys are file paths and values are the initial content (e.g., a comment like '# Main application logic here').""",
89
+ "coder": "You are a professional programmer. Your ONLY job is to write the complete, clean, and functional code for the single file requested. Do NOT add any explanations, introductions, or markdown formatting. Output ONLY the raw source code.",
90
+ "reviewer": """You are a meticulous code reviewer. Analyze the given code for bugs, style issues, and security vulnerabilities.
91
+ Output ONLY a single JSON object with two keys: "has_issues" (boolean) and "suggestions" (a string containing a bulleted list of required changes).""",
92
+ "tester": "You are a QA engineer. Write a complete pytest test file for the given source code. Cover main functionality and edge cases. Output ONLY the raw source code for the test file.",
93
+ "publisher": """You are a release manager. Create final documentation and configuration files.
94
+ Output ONLY a single JSON object where keys are the filenames ("README.md", ".gitignore", "Dockerfile") and values are their complete string content."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  }
96
+ # ------------------------------ FILE SYSTEM & AI TOOLS ------------------------------
 
97
  def get_project_dir(user_id, project_id):
98
  path = os.path.join(PROJECT_ROOT, str(user_id), str(project_id))
99
  os.makedirs(path, exist_ok=True)
 
102
  def create_file(project_dir, path, content):
103
  full_path = os.path.join(project_dir, path)
104
  os.makedirs(os.path.dirname(full_path), exist_ok=True)
105
+ with open(full_path, 'w', encoding='utf-8') as f: f.write(content)
 
106
 
107
  def read_file(project_dir, path):
108
  full_path = os.path.join(project_dir, path)
109
+ try:
110
+ with open(full_path, 'r', encoding='utf-8') as f: return f.read()
111
+ except FileNotFoundError: return None
112
 
113
  def zip_project(project_dir, project_id):
114
  zip_filename = f"project_{project_id}.zip"
115
+ zip_path = os.path.join(os.path.dirname(project_dir), zip_filename)
116
  with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf:
117
  for root, _, files in os.walk(project_dir):
118
+ for file in files: zf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), project_dir))
 
 
 
119
  return zip_path
120
 
 
121
  def _extract_json(text: str) -> Optional[Dict[str, Any]]:
 
122
  match = re.search(r"```json\s*([\s\S]*?)\s*```|(\{[\s\S]*\})", text)
123
+ if not match: return None
 
124
  json_str = match.group(1) or match.group(2)
125
+ try: return json.loads(json_str)
 
126
  except json.JSONDecodeError:
127
  print(f"Failed to decode JSON: {json_str[:200]}...")
128
  return None
129
 
130
  def generate_with_model(role: str, prompt: str) -> str:
 
131
  try:
132
  model_name = MODEL_REGISTRY[role]
133
  tokenizer, model = load_model(model_name)
 
134
  messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
135
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
136
  inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
 
137
  outputs = model.generate(**inputs, max_new_tokens=2048, pad_token_id=tokenizer.eos_token_id)
138
+ return tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True).strip()
 
 
139
  except Exception as e:
140
  print(f"Error during model generation for role {role}: {e}")
141
  return f'{{"error": "Failed to generate response: {str(e)}"}}'
142
 
143
+ # ------------------------------ THE AGENT CHAIN EXECUTOR (REWRITTEN FOR RELIABILITY) ------------------------------
144
  def run_agent_chain(project_id, user_id, initial_prompt):
145
  project_dir = get_project_dir(user_id, project_id)
146
  log_entries = []
147
 
148
  def log_step(agent, action, output=""):
149
  log_entry = f"**[{agent.upper()}]**: {action}\n"
150
+ if output: log_entry += f"```\n{output[:1000]}{'...' if len(output) > 1000 else ''}\n```\n---\n"
 
151
  log_entries.append(log_entry)
152
  update_project_status(project_id, "running", logs="".join(log_entries))
153
 
154
  try:
155
+ log_step("SYSTEM", f"Initializing project directory...")
156
  update_project_status(project_id, "running", logs="Agent team is assembling...")
157
 
158
  # 1. PLANNER
159
  log_step("PLANNER", "Analyzing user request and creating a project plan...")
160
  plan_response = generate_with_model("planner", initial_prompt)
161
  plan_data = _extract_json(plan_response)
162
+ if not plan_data or "files" not in plan_data: raise ValueError("Planner failed to create a valid JSON plan with a 'files' key.")
163
+
164
+ ## ROBUSTNESS FIX: Handle cases where the LLM returns [{"file": "path"}] instead of ["path"]
165
+ if plan_data["files"] and isinstance(plan_data["files"][0], dict):
166
+ log_step("SYSTEM", "Planner returned a list of objects. Normalizing to a list of strings.")
167
+ plan_data["files"] = [item["file"] for item in plan_data["files"] if "file" in item]
168
+
169
  log_step("PLANNER", "Plan created successfully.", json.dumps(plan_data, indent=2))
170
 
171
  # 2. ARCHITECT
172
+ log_step("ARCHITECT", "Creating initial file skeletons...")
173
+ arch_prompt = f"Create initial content for these files:\n{json.dumps(plan_data['files'])}"
174
  arch_response = generate_with_model("architect", arch_prompt)
175
  arch_data = _extract_json(arch_response)
176
  if not arch_data: raise ValueError("Architect failed to create valid JSON file structures.")
177
+ for path, content in arch_data.items(): create_file(project_dir, path, content)
 
178
  log_step("ARCHITECT", "File skeletons created.", "\n".join(arch_data.keys()))
179
 
180
  # 3. CODER
181
  source_files = [f for f in plan_data['files'] if f.startswith('src/') and f.endswith('.py')]
182
  for file_path in source_files:
183
  log_step("CODER", f"Writing complete code for `{file_path}`...")
184
+ coder_prompt = f"Project purpose: {plan_data['purpose']}. Write the full Python code for the file: `{file_path}`."
185
  code = generate_with_model("coder", coder_prompt)
186
  create_file(project_dir, file_path, code)
187
+ log_step("CODER", f"Finished writing `{file_path}`.", code)
188
 
189
  # 4. REVIEWER
190
+ log_step("REVIEWER", "Reviewing all generated source code...")
191
  for file_path in source_files:
192
  code_content = read_file(project_dir, file_path)
193
+ if not code_content: continue
194
+ review_prompt = f"Review this code from `{file_path}`:\n\n{code_content}"
195
  review_response = generate_with_model("reviewer", review_prompt)
196
  review_data = _extract_json(review_response)
197
+ log_step("REVIEWER", f"Review of `{file_path}` complete.", json.dumps(review_data, indent=2))
 
 
 
 
198
 
199
  # 5. TESTER
200
  log_step("TESTER", "Writing unit tests for all source code...")
201
  for file_path in source_files:
202
  code_content = read_file(project_dir, file_path)
203
+ if not code_content: continue
204
  test_file_path = os.path.join("tests", f"test_{os.path.basename(file_path)}")
205
+ tester_prompt = f"Write a pytest test file (`{test_file_path}`) for this code from `{file_path}`:\n\n{code_content}"
206
  test_code = generate_with_model("tester", tester_prompt)
207
  create_file(project_dir, test_file_path, test_code)
208
+ log_step("TESTER", f"Generated test `{test_file_path}`.", test_code)
209
 
210
  # 6. PUBLISHER
211
+ log_step("PUBLISHER", "Generating final documentation and configuration...")
212
+ all_files = [os.path.join(r, f).replace(project_dir, '', 1) for r, d, fs in os.walk(project_dir) for f in fs]
213
+ pub_prompt = f"Project file structure: {json.dumps(all_files)}. Generate README.md, .gitignore, and Dockerfile."
214
  pub_response = generate_with_model("publisher", pub_prompt)
215
  pub_data = _extract_json(pub_response)
216
  if not pub_data: raise ValueError("Publisher failed to create valid final assets.")
217
+ for path, content in pub_data.items(): create_file(project_dir, path, content)
 
218
  log_step("PUBLISHER", "Final assets created.", json.dumps(pub_data, indent=2))
219
 
220
  # 7. FINALIZATION
221
+ log_step("SYSTEM", "Packaging project into a ZIP file...")
222
  zip_path = zip_project(project_dir, project_id)
223
  update_project_status(project_id, "completed", logs="".join(log_entries), zip_path=zip_path)
224
+ log_step("SYSTEM", "Project completed successfully!", f"Download available.")
225
 
226
  except Exception as e:
227
  tb_str = traceback.format_exc()
228
  print(f"--- AGENT CHAIN FAILED for project {project_id} ---\n{tb_str}\n--------------------")
229
+ error_log = "".join(log_entries) + f"\n\n❌ **CRITICAL ERROR:**\nAn unexpected error occurred.\n\n**Details:**\n```{str(e)}```"
230
  update_project_status(project_id, "failed", logs=error_log)
231
 
 
232
  # ------------------------------ JOB QUEUE ------------------------------
 
233
  executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
234
 
235
  def queue_job(project_id, user_id, prompt):