Keeby-smilyai commited on
Commit
40482fd
·
verified ·
1 Parent(s): ced9b63

Update backend.py

Browse files
Files changed (1) hide show
  1. backend.py +10 -13
backend.py CHANGED
@@ -1,4 +1,4 @@
1
- # backend.py — FINAL HARDENED VERSION v1.1
2
  import sqlite3
3
  import os
4
  import json
@@ -74,7 +74,6 @@ def load_model(model_name):
74
  if model_name in _MODEL_CACHE: return _MODEL_CACHE[model_name]
75
  print(f"Loading model: {model_name}...")
76
  tokenizer = AutoTokenizer.from_pretrained(model_name)
77
- # --- THIS IS THE FIX FOR THE WARNING ---
78
  model = AutoModelForCausalLM.from_pretrained(model_name, dtype="auto", device_map="auto", trust_remote_code=True, attn_implementation="eager")
79
  _MODEL_CACHE[model_name] = (tokenizer, model)
80
  print(f"Model {model_name} loaded and cached.")
@@ -82,8 +81,8 @@ def load_model(model_name):
82
 
83
  # ------------------------------ AGENT PROMPTS (SIMPLIFIED & ROBUST) ------------------------------
84
  ROLE_PROMPTS = {
85
- "planner": """You are an expert software planner. Break down the user's request into a detailed plan.
86
- Output ONLY a single JSON object with the keys: "purpose", "features", "dependencies", and "files".
87
  The "files" key MUST be an array of strings representing complete file paths (e.g., ["src/main.py", "tests/test_main.py", "requirements.txt"]).""",
88
  "architect": """You are a software architect. Create initial placeholder content for a list of files.
89
  Output ONLY a single JSON object where keys are file paths and values are the initial content (e.g., a comment like '# Main application logic here').""",
@@ -135,13 +134,15 @@ def generate_with_model(role: str, prompt: str) -> str:
135
  messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
136
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
137
  inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
138
- outputs = model.generate(**inputs, max_new_tokens=2048, pad_token_id=tokenizer.eos_token_id)
 
 
139
  return tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True).strip()
140
  except Exception as e:
141
  print(f"Error during model generation for role {role}: {e}")
142
  return f'{{"error": "Failed to generate response: {str(e)}"}}'
143
 
144
- # ------------------------------ THE AGENT CHAIN EXECUTOR (REWRITTEN FOR RELIABILITY) ------------------------------
145
  def run_agent_chain(project_id, user_id, initial_prompt):
146
  project_dir = get_project_dir(user_id, project_id)
147
  log_entries = []
@@ -156,16 +157,12 @@ def run_agent_chain(project_id, user_id, initial_prompt):
156
  log_step("SYSTEM", "Initializing project...")
157
 
158
  # 1. PLANNER
159
- log_step("PLANNER", "Analyzing user request and creating a project plan...")
160
  plan_response = generate_with_model("planner", initial_prompt)
161
  plan_data = _extract_json(plan_response)
162
  if not plan_data or "files" not in plan_data: raise ValueError("Planner failed to create a valid JSON plan with a 'files' key.")
163
 
164
- if plan_data["files"] and isinstance(plan_data["files"][0], dict):
165
- log_step("SYSTEM", "Correcting malformed file list from Planner...")
166
- plan_data["files"] = [item["file"] for item in plan_data["files"] if "file" in item]
167
-
168
- log_step("PLANNER", "Plan created.", json.dumps(plan_data, indent=2))
169
 
170
  # 2. ARCHITECT
171
  log_step("ARCHITECT", "Creating initial file skeletons...")
@@ -180,7 +177,7 @@ def run_agent_chain(project_id, user_id, initial_prompt):
180
  source_files = [f for f in plan_data['files'] if f.startswith('src/') and f.endswith('.py')]
181
  for file_path in source_files:
182
  log_step("CODER", f"Writing complete code for `{file_path}`...")
183
- coder_prompt = f"Project purpose: {plan_data['purpose']}. Write the full Python code for the file: `{file_path}`."
184
  code = generate_with_model("coder", coder_prompt)
185
  create_file(project_dir, file_path, code)
186
  log_step("CODER", f"Finished writing `{file_path}`.", code)
 
1
+ # backend.py — FINAL HARDENED VERSION v1.2
2
  import sqlite3
3
  import os
4
  import json
 
74
  if model_name in _MODEL_CACHE: return _MODEL_CACHE[model_name]
75
  print(f"Loading model: {model_name}...")
76
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
77
  model = AutoModelForCausalLM.from_pretrained(model_name, dtype="auto", device_map="auto", trust_remote_code=True, attn_implementation="eager")
78
  _MODEL_CACHE[model_name] = (tokenizer, model)
79
  print(f"Model {model_name} loaded and cached.")
 
81
 
82
  # ------------------------------ AGENT PROMPTS (SIMPLIFIED & ROBUST) ------------------------------
83
  ROLE_PROMPTS = {
84
+ "planner": """You are an expert file planner. Based on the user's request, determine the necessary file structure.
85
+ Output ONLY a single JSON object with a single key: "files".
86
  The "files" key MUST be an array of strings representing complete file paths (e.g., ["src/main.py", "tests/test_main.py", "requirements.txt"]).""",
87
  "architect": """You are a software architect. Create initial placeholder content for a list of files.
88
  Output ONLY a single JSON object where keys are file paths and values are the initial content (e.g., a comment like '# Main application logic here').""",
 
134
  messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
135
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
136
  inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
137
+
138
+ outputs = model.generate(**inputs, max_new_tokens=2048, pad_token_id=tokenizer.eos_token_id, use_cache=False)
139
+
140
  return tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True).strip()
141
  except Exception as e:
142
  print(f"Error during model generation for role {role}: {e}")
143
  return f'{{"error": "Failed to generate response: {str(e)}"}}'
144
 
145
+ # ------------------------------ THE AGENT CHAIN EXECUTOR ------------------------------
146
  def run_agent_chain(project_id, user_id, initial_prompt):
147
  project_dir = get_project_dir(user_id, project_id)
148
  log_entries = []
 
157
  log_step("SYSTEM", "Initializing project...")
158
 
159
  # 1. PLANNER
160
+ log_step("PLANNER", "Determining file structure from user request...")
161
  plan_response = generate_with_model("planner", initial_prompt)
162
  plan_data = _extract_json(plan_response)
163
  if not plan_data or "files" not in plan_data: raise ValueError("Planner failed to create a valid JSON plan with a 'files' key.")
164
 
165
+ log_step("PLANNER", "File structure plan created.", json.dumps(plan_data, indent=2))
 
 
 
 
166
 
167
  # 2. ARCHITECT
168
  log_step("ARCHITECT", "Creating initial file skeletons...")
 
177
  source_files = [f for f in plan_data['files'] if f.startswith('src/') and f.endswith('.py')]
178
  for file_path in source_files:
179
  log_step("CODER", f"Writing complete code for `{file_path}`...")
180
+ coder_prompt = f"Based on the user's overall request: '{initial_prompt}'.\n\nWrite the full Python code for the file: `{file_path}`."
181
  code = generate_with_model("coder", coder_prompt)
182
  create_file(project_dir, file_path, code)
183
  log_step("CODER", f"Finished writing `{file_path}`.", code)