Keeby-smilyai commited on
Commit
6e90b51
·
verified ·
1 Parent(s): c8c7b0e

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +418 -214
backend.py CHANGED
@@ -6,20 +6,24 @@ import zipfile
6
  import tempfile
7
  import subprocess
8
  import re
9
- from huggingface_hub import whoami
 
 
 
10
 
11
- DB_PATH = "code_agents.db"
12
  PROJECT_ROOT = "./projects"
13
  os.makedirs(PROJECT_ROOT, exist_ok=True)
14
 
 
15
  def init_db():
16
  conn = sqlite3.connect(DB_PATH)
17
  cursor = conn.cursor()
18
  cursor.executescript("""
19
  CREATE TABLE IF NOT EXISTS users (
20
  id INTEGER PRIMARY KEY AUTOINCREMENT,
21
- hf_token TEXT UNIQUE NOT NULL,
22
- username TEXT,
23
  created_at DATETIME DEFAULT CURRENT_TIMESTAMP
24
  );
25
  CREATE TABLE IF NOT EXISTS projects (
@@ -27,69 +31,274 @@ def init_db():
27
  user_id INTEGER NOT NULL,
28
  title TEXT,
29
  description TEXT,
 
30
  zip_path TEXT,
31
  created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
32
  FOREIGN KEY (user_id) REFERENCES users(id)
33
  );
 
34
  """)
35
  conn.commit()
36
  conn.close()
37
 
38
  init_db()
39
 
40
- def get_user_by_token(hf_token):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  conn = sqlite3.connect(DB_PATH)
42
  cursor = conn.cursor()
43
- cursor.execute("SELECT id, username FROM users WHERE hf_token = ?", (hf_token,))
44
  row = cursor.fetchone()
45
  conn.close()
46
  return row if row else None
47
 
48
- def create_user(hf_token, username=None):
49
  conn = sqlite3.connect(DB_PATH)
50
  cursor = conn.cursor()
51
- cursor.execute("INSERT INTO users (hf_token, username) VALUES (?, ?)", (hf_token, username))
52
- user_id = cursor.lastrowid
53
- conn.commit()
 
 
54
  conn.close()
55
- return user_id
56
 
57
- def create_project(user_id, title, description, zip_path):
58
  conn = sqlite3.connect(DB_PATH)
59
  cursor = conn.cursor()
60
  cursor.execute("""
61
- INSERT INTO projects (user_id, title, description, zip_path)
62
- VALUES (?, ?, ?, ?)
63
- """, (user_id, title, description, zip_path))
64
  project_id = cursor.lastrowid
65
  conn.commit()
66
  conn.close()
67
  return project_id
68
 
69
- def get_user_projects(user_id):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  conn = sqlite3.connect(DB_PATH)
71
  cursor = conn.cursor()
72
  cursor.execute("""
73
- SELECT id, title, description, zip_path, created_at
74
- FROM projects WHERE user_id = ? ORDER BY created_at DESC
75
- """, (user_id,))
76
- projects = cursor.fetchall()
77
  conn.close()
78
- return projects
79
 
80
- def verify_hf_token(token):
81
- try:
82
- user_info = whoami(token=token)
83
- username = user_info.get("name", "Anonymous")
84
- user = get_user_by_token(token)
85
- if not user:
86
- user_id = create_user(token, username)
87
- return user_id, f"Welcome to Code Agents, {username}! Your AI team is ready."
88
- else:
89
- return user[0], f"Welcome back, {user[1]}! Your projects are waiting."
90
- except Exception as e:
91
- return None, f"Invalid token. Please try again. ({str(e)})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  def get_user_project_dir(user_id):
94
  user_dir = os.path.join(PROJECT_ROOT, str(user_id))
95
  os.makedirs(user_dir, exist_ok=True)
@@ -105,8 +314,6 @@ def clear_user_project_dir(user_id):
105
  else:
106
  os.remove(path)
107
 
108
- TOOLS = {}
109
-
110
  def create_file(user_id, path: str, content: str):
111
  user_dir = get_user_project_dir(user_id)
112
  full_path = os.path.join(user_dir, path.lstrip("/"))
@@ -166,35 +373,21 @@ def scan_vulns(user_id):
166
  result = subprocess.run(["pip-audit", "-r", reqs], capture_output=True, text=True)
167
  return result.stdout if result.returncode != 0 else "No known vulnerabilities found."
168
 
169
- def search_hf_docs(query: str):
170
- return f"Searching Hugging Face docs for: '{query}' -> Found: https://huggingface.co/docs/transformers/main/en/quicktour"
171
-
172
- def write_readme(user_id, project_name: str, description: str):
173
- user_dir = get_user_project_dir(user_id)
174
- readme_content = f"""Project Name: {project_name}
175
-
176
- Description:
177
- {description}
178
-
179
- How to Run:
180
- 1. Install dependencies: pip install -r requirements.txt
181
- 2. Run the main file: python cli.py
182
 
183
- This project was built by Code Agents — an AI team on Hugging Face.
184
- """
185
- create_file(user_id, "README.md", readme_content)
186
  return "README.md generated."
187
 
188
- def generate_dockerfile(user_id):
189
- user_dir = get_user_project_dir(user_id)
190
- dockerfile = """FROM python:3.10-slim
191
- COPY . /app
192
- WORKDIR /app
193
- RUN pip install --no-cache-dir -r requirements.txt
194
- CMD ["python", "cli.py"]
195
- """
196
- create_file(user_id, "Dockerfile", dockerfile)
197
- return "Dockerfile generated."
198
 
199
  def zip_project(user_id, project_name: str):
200
  user_dir = get_user_project_dir(user_id)
@@ -208,165 +401,176 @@ def zip_project(user_id, project_name: str):
208
  zf.write(full_path, arcname)
209
  return zip_path
210
 
211
- def fix_error(error_msg: str, code: str):
212
- if "ModuleNotFoundError" in error_msg and "yt_dlp" in error_msg:
213
- return "Add 'yt-dlp' to requirements.txt"
214
- elif "SyntaxError" in error_msg:
215
- return "Fix indentation or missing colon"
216
- return "Unable to auto-fix. Please review manually."
217
-
218
- TOOLS = {
219
- "create_file": {"func": create_file, "desc": "Create a new file at specified path with content"},
220
- "read_file": {"func": read_file, "desc": "Read content of a file"},
221
- "list_files": {"func": list_files, "desc": "List all files in the project directory"},
222
- "execute_code": {"func": execute_code, "desc": "Execute Python code and return stdout/stderr"},
223
- "run_tests": {"func": run_tests, "desc": "Run pytest on test files in /tests directory"},
224
- "lint_code": {"func": lint_code, "desc": "Run flake8 to check code style and errors"},
225
- "scan_vulns": {"func": scan_vulns, "desc": "Scan dependencies for security vulnerabilities"},
226
- "search_hf_docs": {"func": search_hf_docs, "desc": "Search Hugging Face documentation for model usage"},
227
- "write_readme": {"func": write_readme, "desc": "Generate a README.md for the project"},
228
- "generate_dockerfile": {"func": generate_dockerfile, "desc": "Generate a Dockerfile for containerization"},
229
- "zip_project": {"func": zip_project, "desc": "Compress entire project into a downloadable ZIP"},
230
- "fix_error": {"func": fix_error, "desc": "Given an error message and code, suggest a fix"}
231
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
- AGENT_PROMPTS = {
234
- "architect": """
235
- You are an expert software architect. Your job is to plan the project structure.
236
- Given a user request, decide:
237
- - File/folder structure
238
- - Required libraries
239
- - Tech stack (e.g., Flask, CLI, etc.)
240
 
241
- Respond only in JSON format:
242
- {
243
- "plan": "Brief plan",
244
- "files": [
245
- {"path": "cli.py", "content": "// initial code"},
246
- {"path": "requirements.txt", "content": "yt-dlp\\nflask"}
247
- ]
248
- }
249
- """,
250
- "coder": """
251
- You are a senior Python developer. Write clean, efficient, well-commented code.
252
- Use the tools available: create_file, read_file, execute_code, search_hf_docs.
253
- Never guess — use tools to verify.
254
- When done, write the file using create_file().
255
- """,
256
- "tester": """
257
- You are a QA engineer. Write unit tests for all functions.
258
- Use pytest. Create test files in /tests/.
259
- Use execute_code() to run tests and verify output.
260
- If tests fail, ask coder to fix.
261
- """,
262
- "reviewer": """
263
- You are a security and code review expert.
264
- Check for:
265
- - Security flaws (e.g., eval(), shell injection)
266
- - Poor practices
267
- - Missing error handling
268
- - Unused imports
269
- Use lint_code(), scan_vulns(), read_file()
270
- Suggest fixes using fix_error(). Update files with create_file().
271
- """,
272
- "deployer": """
273
- You are a DevOps engineer.
274
- Generate:
275
- - README.md with usage instructions
276
- - Dockerfile
277
- - ZIP archive of project
278
- Use: write_readme(), generate_dockerfile(), zip_project()
279
- Return final ZIP path when done.
280
- """
281
- }
282
 
283
- class Agent:
284
- def __init__(self, name: str, prompt: str, tools: dict, user_id: int):
285
- self.name = name
286
- self.prompt = prompt
287
- self.tools = tools
288
- self.user_id = user_id
289
- self.memory = []
 
 
 
 
 
290
 
291
- def think(self, task: str) -> str:
292
- return f"[{self.name}] {task} (simulated)"
 
293
 
294
- def act(self, action: str) -> str:
295
  try:
296
- if "(" not in action or ")" not in action:
297
- return "Invalid action format"
298
- func_name = action.split("(")[0]
299
- args_str = action[len(func_name)+1:-1]
300
- args = {}
301
- if args_str:
302
- for pair in args_str.split(","):
303
- k, v = pair.split("=")
304
- args[k.strip()] = v.strip().strip('"\'')
305
- if func_name in self.tools:
306
- bound_func = lambda **kwargs: self.tools[func_name]["func"](self.user_id, **kwargs)
307
- return bound_func(**args)
308
- return f"Unknown tool: {func_name}"
309
- except Exception as e:
310
- return f"Error executing {action}: {str(e)}"
311
-
312
- def run(self, task: str) -> str:
313
- response = self.think(task)
314
- self.memory.append(f"{self.name}: {response}")
315
-
316
- actions = re.findall(r'(\w+\(.*?\))', response)
317
- for action in actions:
318
- result = self.act(action)
319
- self.memory.append(f"{self.name} {action} → {result}")
320
-
321
- return "\n".join(self.memory[-5:])
322
-
323
- def run_code_agents(user_id: int, project_description: str):
324
- clear_user_project_dir(user_id)
325
- user_dir = get_user_project_dir(user_id)
 
 
 
 
 
 
 
 
 
 
326
 
327
- agents = {
328
- "architect": Agent("Architect", AGENT_PROMPTS["architect"], TOOLS, user_id),
329
- "coder": Agent("Coder", AGENT_PROMPTS["coder"], TOOLS, user_id),
330
- "tester": Agent("Tester", AGENT_PROMPTS["tester"], TOOLS, user_id),
331
- "reviewer": Agent("Reviewer", AGENT_PROMPTS["reviewer"], TOOLS, user_id),
332
- "deployer": Agent("Deployer", AGENT_PROMPTS["deployer"], TOOLS, user_id),
333
- }
334
-
335
- timeline = []
336
- logs = ""
337
-
338
- logs += "Architect planning...\n"
339
- plan = agents["architect"].run(f"Plan a project: {project_description}")
340
- timeline.append({"agent": "architect", "step": "plan", "output": plan})
341
- logs += plan + "\n\n"
342
-
343
- logs += "Coder implementing...\n"
344
- coder_task = f"Implement the project based on this plan:\n{plan}"
345
- coder_result = agents["coder"].run(coder_task)
346
- timeline.append({"agent": "coder", "step": "implement", "output": coder_result})
347
- logs += coder_result + "\n\n"
348
-
349
- logs += "Tester adding tests...\n"
350
- test_task = "Write tests for the implemented code. Save to /tests/test_main.py"
351
- test_result = agents["tester"].run(test_task)
352
- timeline.append({"agent": "tester", "step": "test", "output": test_result})
353
- logs += test_result + "\n\n"
354
-
355
- logs += "Reviewer checking code...\n"
356
- review_task = "Review all files. Fix any bugs or security issues."
357
- review_result = agents["reviewer"].run(review_task)
358
- timeline.append({"agent": "reviewer", "step": "review", "output": review_result})
359
- logs += review_result + "\n\n"
360
-
361
- logs += "Deployer packaging...\n"
362
- deploy_task = f"Package the project named '{project_description[:20]}...' into a ZIP with README and Dockerfile."
363
- deploy_result = agents["deployer"].run(deploy_task)
364
- timeline.append({"agent": "deployer", "step": "deploy", "output": deploy_result})
365
-
366
- zip_files = [f for f in os.listdir(user_dir) if f.endswith(".zip")]
367
- zip_path = os.path.join(user_dir, zip_files[0]) if zip_files else None
368
 
369
- if zip_path:
370
- project_id = create_project(user_id, project_description[:50], project_description, zip_path)
 
 
 
 
371
 
372
- return logs, zip_path, timeline
 
 
6
  import tempfile
7
  import subprocess
8
  import re
9
+ import concurrent.futures
10
+ from typing import List, Dict, Any
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+ import torch
13
 
14
+ DB_PATH = "code_agents_pro.db"
15
  PROJECT_ROOT = "./projects"
16
  os.makedirs(PROJECT_ROOT, exist_ok=True)
17
 
18
+ # ------------------------------ DATABASE ------------------------------
19
  def init_db():
20
  conn = sqlite3.connect(DB_PATH)
21
  cursor = conn.cursor()
22
  cursor.executescript("""
23
  CREATE TABLE IF NOT EXISTS users (
24
  id INTEGER PRIMARY KEY AUTOINCREMENT,
25
+ username TEXT UNIQUE NOT NULL,
26
+ password_hash TEXT NOT NULL,
27
  created_at DATETIME DEFAULT CURRENT_TIMESTAMP
28
  );
29
  CREATE TABLE IF NOT EXISTS projects (
 
31
  user_id INTEGER NOT NULL,
32
  title TEXT,
33
  description TEXT,
34
+ status TEXT DEFAULT 'queued', -- queued, running, completed, failed
35
  zip_path TEXT,
36
  created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
37
  FOREIGN KEY (user_id) REFERENCES users(id)
38
  );
39
+ CREATE INDEX idx_user_status ON projects(user_id, status);
40
  """)
41
  conn.commit()
42
  conn.close()
43
 
44
  init_db()
45
 
46
+ def hash_password(password):
47
+ # In production, use bcrypt or argon2. For now, simple hash for demo.
48
+ import hashlib
49
+ return hashlib.sha256(password.encode()).hexdigest()
50
+
51
+ def verify_password(password, stored_hash):
52
+ return hash_password(password) == stored_hash
53
+
54
+ def create_user(username, password):
55
+ conn = sqlite3.connect(DB_PATH)
56
+ cursor = conn.cursor()
57
+ try:
58
+ cursor.execute("INSERT INTO users (username, password_hash) VALUES (?, ?)",
59
+ (username, hash_password(password)))
60
+ user_id = cursor.lastrowid
61
+ conn.commit()
62
+ conn.close()
63
+ return user_id
64
+ except sqlite3.IntegrityError:
65
+ conn.close()
66
+ return None
67
+
68
+ def get_user_by_username(username):
69
  conn = sqlite3.connect(DB_PATH)
70
  cursor = conn.cursor()
71
+ cursor.execute("SELECT id, password_hash FROM users WHERE username = ?", (username,))
72
  row = cursor.fetchone()
73
  conn.close()
74
  return row if row else None
75
 
76
+ def get_user_projects(user_id, limit=3):
77
  conn = sqlite3.connect(DB_PATH)
78
  cursor = conn.cursor()
79
+ cursor.execute("""
80
+ SELECT id, title, description, status, zip_path, created_at
81
+ FROM projects WHERE user_id = ? ORDER BY created_at DESC LIMIT ?
82
+ """, (user_id, limit))
83
+ projects = cursor.fetchall()
84
  conn.close()
85
+ return projects
86
 
87
+ def create_project(user_id, title, description):
88
  conn = sqlite3.connect(DB_PATH)
89
  cursor = conn.cursor()
90
  cursor.execute("""
91
+ INSERT INTO projects (user_id, title, description, status)
92
+ VALUES (?, ?, ?, 'queued')
93
+ """, (user_id, title, description))
94
  project_id = cursor.lastrowid
95
  conn.commit()
96
  conn.close()
97
  return project_id
98
 
99
+ def update_project_status(project_id, status, zip_path=None):
100
+ conn = sqlite3.connect(DB_PATH)
101
+ cursor = conn.cursor()
102
+ if zip_path:
103
+ cursor.execute("""
104
+ UPDATE projects SET status = ?, zip_path = ? WHERE id = ?
105
+ """, (status, zip_path, project_id))
106
+ else:
107
+ cursor.execute("""
108
+ UPDATE projects SET status = ? WHERE id = ?
109
+ """, (status, project_id))
110
+ conn.commit()
111
+ conn.close()
112
+
113
+ def get_project(project_id):
114
  conn = sqlite3.connect(DB_PATH)
115
  cursor = conn.cursor()
116
  cursor.execute("""
117
+ SELECT id, user_id, title, description, status, zip_path
118
+ FROM projects WHERE id = ?
119
+ """, (project_id,))
120
+ row = cursor.fetchone()
121
  conn.close()
122
+ return row
123
 
124
+ def search_projects(user_id, query):
125
+ conn = sqlite3.connect(DB_PATH)
126
+ cursor = conn.cursor()
127
+ cursor.execute("""
128
+ SELECT id, title, description, status, zip_path
129
+ FROM projects WHERE user_id = ? AND (title LIKE ? OR description LIKE ?)
130
+ ORDER BY created_at DESC
131
+ """, (user_id, f"%{query}%", f"%{query}%"))
132
+ results = cursor.fetchall()
133
+ conn.close()
134
+ return results
135
+
136
+ # ------------------------------ MODEL LOADING (PER ROLE) ------------------------------
137
+ # Each role gets the best-suited model — no compromises
138
+
139
+ MODEL_REGISTRY = {
140
+ "understander": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
141
+ "architect": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
142
+ "ceo": "google/gemma-2-2b-it",
143
+ "manager": "microsoft/Phi-3-mini-4k-instruct",
144
+ "worker": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
145
+ "reviewer": "microsoft/Phi-3-mini-4k-instruct",
146
+ "editor": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
147
+ "tester": "microsoft/Phi-3-mini-4k-instruct",
148
+ "publisher": "Qwen/Qwen2.5-Coder-0.6B-Instruct",
149
+ }
150
+
151
+ # Load models once at startup
152
+ _MODEL_CACHE = {}
153
+
154
+ def load_model(model_name):
155
+ if model_name in _MODEL_CACHE:
156
+ return _MODEL_CACHE[model_name]
157
+
158
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
159
+ model = AutoModelForCausalLM.from_pretrained(
160
+ model_name,
161
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
162
+ device_map="auto",
163
+ trust_remote_code=True
164
+ )
165
+ _MODEL_CACHE[model_name] = (tokenizer, model)
166
+ return tokenizer, model
167
+
168
+ # ------------------------------ PROMPTS PER ROLE ------------------------------
169
+ ROLE_PROMPTS = {
170
+ "understander": """
171
+ You are the Understander. Your job is to deeply analyze the user's project request.
172
+ Extract:
173
+ - Core functionality
174
+ - Required libraries
175
+ - File structure
176
+ - Edge cases
177
+
178
+ Output only JSON:
179
+ {
180
+ "purpose": "string",
181
+ "features": ["list of features"],
182
+ "dependencies": ["list of required pip packages"],
183
+ "files": [
184
+ {"path": "filename.py", "type": "source|test|config|other"},
185
+ ...
186
+ ]
187
+ }
188
+ """,
189
+
190
+ "architect": """
191
+ You are the Architect. Based on the Understander's analysis, design the full project structure.
192
+ Define:
193
+ - Folder hierarchy
194
+ - File contents (minimal skeleton)
195
+ - Configuration files
196
+
197
+ Output only JSON:
198
+ {
199
+ "structure": [
200
+ {"path": "src/main.py", "content": "// empty file"},
201
+ {"path": "requirements.txt", "content": "flask\\nrequests"}
202
+ ],
203
+ "notes": "Brief notes on architecture decisions"
204
+ }
205
+ """,
206
+
207
+ "ceo": """
208
+ You are the CEO. You oversee the entire project. You receive input from the Architect.
209
+ Your job is to review the plan, ensure alignment with goals, and assign tasks to Managers.
210
+ Respond only with: "Approved. Assigning to Managers."
211
+ """,
212
+
213
+ "manager": """
214
+ You are a Manager. You receive a task from the CEO.
215
+ Assign work to Workers based on file type.
216
+ Example:
217
+ - "Write src/main.py" → assign to Worker A
218
+ - "Write tests/test_main.py" → assign to Worker B
219
+
220
+ Return only JSON:
221
+ {
222
+ "tasks": [
223
+ {"file": "src/main.py", "assigned_to": "worker", "instructions": "Write a Flask route..."},
224
+ {"file": "tests/test_main.py", "assigned_to": "worker", "instructions": "Write a test for the /upload endpoint..."}
225
+ ]
226
+ }
227
+ """,
228
+
229
+ "worker": """
230
+ You are a Worker. You write clean, functional code based on instructions.
231
+ Never guess. Use tools if needed.
232
+ Only output the raw file content — no markdown, no explanations.
233
+ If writing Python, use proper imports, error handling, comments.
234
+
235
+ File: {file}
236
+ Instructions: {instructions}
237
+ """,
238
+
239
+ "reviewer": """
240
+ You are the Reviewer. You audit all code written by Workers.
241
+ Check for:
242
+ - Security flaws (eval(), shell injection)
243
+ - Missing error handling
244
+ - Unused imports
245
+ - Poor variable names
246
+ - Logic errors
247
+
248
+ For each issue, return:
249
+ {
250
+ "file": "path/to/file.py",
251
+ "issue": "description",
252
+ "suggestion": "fix suggestion"
253
+ }
254
 
255
+ If no issues: return {"issues": []}
256
+ """,
257
+
258
+ "editor": """
259
+ You are the Editor. Apply all Reviewer suggestions to the code.
260
+ Update files directly.
261
+ Do not explain — just rewrite the file content.
262
+ If a file doesn't exist, create it.
263
+ Return only the updated file content.
264
+ """,
265
+
266
+ "tester": """
267
+ You are a Tester. Write unit tests for each source file.
268
+ Use pytest. Structure:
269
+ - File: tests/test_{filename}.py
270
+ - Use assert statements
271
+ - Cover edge cases
272
+
273
+ Only output the test file content — nothing else.
274
+ """,
275
+
276
+ "publisher": """
277
+ You are the Publisher. Finalize the project.
278
+ Generate:
279
+ - README.md (plain text, no markdown formatting)
280
+ - Dockerfile
281
+ - requirements.txt (complete, deduplicated)
282
+ - .gitignore
283
+
284
+ Output each file as:
285
+ {
286
+ "README.md": "...",
287
+ "Dockerfile": "...",
288
+ "requirements.txt": "...",
289
+ ".gitignore": "venv/\n__pycache__/"
290
+ }
291
+ """,
292
+
293
+ "final_ceo": """
294
+ You are the final CEO. You receive the packaged project from the Publisher.
295
+ Verify everything is complete and correct.
296
+ If yes: respond with "Ready to package."
297
+ If no: list missing items.
298
+ """
299
+ }
300
+
301
+ # ------------------------------ TOOLS (SAME AS BEFORE) ------------------------------
302
  def get_user_project_dir(user_id):
303
  user_dir = os.path.join(PROJECT_ROOT, str(user_id))
304
  os.makedirs(user_dir, exist_ok=True)
 
314
  else:
315
  os.remove(path)
316
 
 
 
317
  def create_file(user_id, path: str, content: str):
318
  user_dir = get_user_project_dir(user_id)
319
  full_path = os.path.join(user_dir, path.lstrip("/"))
 
373
  result = subprocess.run(["pip-audit", "-r", reqs], capture_output=True, text=True)
374
  return result.stdout if result.returncode != 0 else "No known vulnerabilities found."
375
 
376
+ def generate_dockerfile(user_id, content: str):
377
+ create_file(user_id, "Dockerfile", content)
378
+ return "Dockerfile generated."
 
 
 
 
 
 
 
 
 
 
379
 
380
+ def write_readme(user_id, content: str):
381
+ create_file(user_id, "README.md", content)
 
382
  return "README.md generated."
383
 
384
+ def write_requirements(user_id, content: str):
385
+ create_file(user_id, "requirements.txt", content)
386
+ return "requirements.txt generated."
387
+
388
+ def write_gitignore(user_id, content: str):
389
+ create_file(user_id, ".gitignore", content)
390
+ return ".gitignore generated."
 
 
 
391
 
392
  def zip_project(user_id, project_name: str):
393
  user_dir = get_user_project_dir(user_id)
 
401
  zf.write(full_path, arcname)
402
  return zip_path
403
 
404
+ # ------------------------------ LLM GENERATOR (CORE ENGINE) ------------------------------
405
+ def generate_with_model(role: str, prompt: str, context: dict = {}) -> str:
406
+ model_name = MODEL_REGISTRY[role]
407
+ tokenizer, model = load_model(model_name)
408
+
409
+ # Special formatting per role
410
+ if role == "understander":
411
+ messages = [{"role": "system", "content": ROLE_PROMPTS["understander"]},
412
+ {"role": "user", "content": prompt}]
413
+ elif role == "architect":
414
+ messages = [{"role": "system", "content": ROLE_PROMPTS["architect"]},
415
+ {"role": "user", "content": prompt}]
416
+ elif role == "ceo":
417
+ messages = [{"role": "system", "content": ROLE_PROMPTS["ceo"]},
418
+ {"role": "user", "content": prompt}]
419
+ elif role == "manager":
420
+ messages = [{"role": "system", "content": ROLE_PROMPTS["manager"]},
421
+ {"role": "user", "content": prompt}]
422
+ elif role == "worker":
423
+ messages = [{"role": "system", "content": ROLE_PROMPTS["worker"].format(file=context.get("file"), instructions=context.get("instructions"))},
424
+ {"role": "user", "content": ""}]
425
+ elif role == "reviewer":
426
+ messages = [{"role": "system", "content": ROLE_PROMPTS["reviewer"]},
427
+ {"role": "user", "content": prompt}]
428
+ elif role == "editor":
429
+ messages = [{"role": "system", "content": ROLE_PROMPTS["editor"]},
430
+ {"role": "user", "content": prompt}]
431
+ elif role == "tester":
432
+ messages = [{"role": "system", "content": ROLE_PROMPTS["tester"]},
433
+ {"role": "user", "content": prompt}]
434
+ elif role == "publisher":
435
+ messages = [{"role": "system", "content": ROLE_PROMPTS["publisher"]},
436
+ {"role": "user", "content": prompt}]
437
+ elif role == "final_ceo":
438
+ messages = [{"role": "system", "content": ROLE_PROMPTS["final_ceo"]},
439
+ {"role": "user", "content": prompt}]
440
+ else:
441
+ messages = [{"role": "user", "content": prompt}]
442
+
443
+ text = tokenizer.apply_chat_template(
444
+ messages,
445
+ tokenize=False,
446
+ add_generation_prompt=True
447
+ )
448
+
449
+ inputs = tokenizer(text, return_tensors="pt").to(model.device)
450
+
451
+ outputs = model.generate(
452
+ **inputs,
453
+ max_new_tokens=512,
454
+ temperature=0.2,
455
+ top_p=0.9,
456
+ do_sample=True,
457
+ pad_token_id=tokenizer.eos_token_id,
458
+ eos_token_id=tokenizer.eos_token_id
459
+ )
460
+
461
+ response = tokenizer.decode(outputs[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
462
+ return response.strip()
463
+
464
+ # ------------------------------ ASYNC JOB EXECUTOR ------------------------------
465
+ def run_agent_chain(project_id, user_id, initial_prompt):
466
+ try:
467
+ update_project_status(project_id, "running")
468
 
469
+ user_dir = get_user_project_dir(user_id)
470
+ clear_user_project_dir(user_id)
 
 
 
 
 
471
 
472
+ # Step 1: Understander
473
+ understand_result = generate_with_model("understander", initial_prompt)
474
+ update_project_status(project_id, "running", f"Understood: {understand_result[:100]}...")
475
+
476
+ # Step 2: Architect
477
+ architect_result = generate_with_model("architect", understand_result)
478
+ update_project_status(project_id, "running", "Architected structure.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
 
480
+ # Parse architect JSON
481
+ try:
482
+ import json
483
+ arch_data = json.loads(architect_result)
484
+ for file in arch_data.get("structure", []):
485
+ create_file(user_id, file["path"], file["content"])
486
+ except:
487
+ pass
488
+
489
+ # Step 3: CEO
490
+ ceo_response = generate_with_model("ceo", "Review architecture and approve.")
491
+ update_project_status(project_id, "running", "CEO approved.")
492
 
493
+ # Step 4: Manager assigns tasks
494
+ manager_response = generate_with_model("manager", "Assign tasks based on architecture.")
495
+ update_project_status(project_id, "running", "Manager assigned tasks.")
496
 
 
497
  try:
498
+ import json
499
+ tasks = json.loads(manager_response).get("tasks", [])
500
+ for task in tasks:
501
+ worker_response = generate_with_model("worker", "", context={
502
+ "file": task["file"],
503
+ "instructions": task["instructions"]
504
+ })
505
+ create_file(user_id, task["file"], worker_response)
506
+ except:
507
+ pass
508
+
509
+ # Step 5: Reviewer
510
+ all_files = "\n".join(list_files(user_id))
511
+ reviewer_input = f"Review these files:\n\n{all_files}"
512
+ reviewer_result = generate_with_model("reviewer", reviewer_input)
513
+ update_project_status(project_id, "running", "Reviewer audited code.")
514
+
515
+ # Step 6: Editor applies fixes
516
+ editor_input = f"Apply these changes:\n{reviewer_result}\n\nFiles:\n{all_files}"
517
+ editor_response = generate_with_model("editor", editor_input)
518
+ update_project_status(project_id, "running", "Editor applied fixes.")
519
+
520
+ # Step 7: Testers generate tests
521
+ for filename in [f for f in list_files(user_id) if f.endswith(".py") and not f.startswith("test_")]:
522
+ test_prompt = f"Write a test for this file: {filename}"
523
+ test_content = generate_with_model("tester", test_prompt)
524
+ test_path = f"tests/test_{os.path.basename(filename)}"
525
+ create_file(user_id, test_path, test_content)
526
+ update_project_status(project_id, "running", "Testers wrote unit tests.")
527
+
528
+ # Step 8: Editor reviews tests
529
+ test_files = "\n".join([f for f in list_files(user_id) if f.startswith("tests/")])
530
+ editor_test_input = f"Review these test files:\n\n{test_files}\n\nMake sure they are complete and correct."
531
+ editor_test_response = generate_with_model("editor", editor_test_input)
532
+ update_project_status(project_id, "running", "Editor reviewed tests.")
533
+
534
+ # Step 9: Publisher creates final assets
535
+ publisher_input = "Generate final project assets: README.md, Dockerfile, requirements.txt, .gitignore"
536
+ publisher_response = generate_with_model("publisher", publisher_input)
537
+ update_project_status(project_id, "running", "Publisher generated assets.")
538
 
539
+ try:
540
+ import json
541
+ pub_data = json.loads(publisher_response)
542
+ for fname, content in pub_data.items():
543
+ if fname == "README.md":
544
+ write_readme(user_id, content)
545
+ elif fname == "Dockerfile":
546
+ generate_dockerfile(user_id, content)
547
+ elif fname == "requirements.txt":
548
+ write_requirements(user_id, content)
549
+ elif fname == ".gitignore":
550
+ write_gitignore(user_id, content)
551
+ except:
552
+ pass
553
+
554
+ # Step 10: Final CEO
555
+ final_input = "All files generated. Verify completeness."
556
+ final_ceo_response = generate_with_model("final_ceo", final_input)
557
+ update_project_status(project_id, "running", "Final CEO verified.")
558
+
559
+ if "Ready to package" not in final_ceo_response:
560
+ raise Exception(f"Final CEO rejected: {final_ceo_response}")
561
+
562
+ # Step 11: ZIP & Save
563
+ zip_path = zip_project(user_id, f"project_{project_id}")
564
+ update_project_status(project_id, "completed", zip_path)
565
+
566
+ return "Completed successfully"
 
 
 
 
 
 
 
 
 
 
 
 
 
567
 
568
+ except Exception as e:
569
+ update_project_status(project_id, "failed", str(e))
570
+ return f"Failed: {str(e)}"
571
+
572
+ # ------------------------------ JOB QUEUE ------------------------------
573
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
574
 
575
+ def queue_job(project_id, user_id, prompt):
576
+ executor.submit(run_agent_chain, project_id, user_id, prompt)