harshadh01 commited on
Commit
67ebad0
·
verified ·
1 Parent(s): cf4c7fd

error fix

Browse files
Files changed (1) hide show
  1. generator.py +420 -420
generator.py CHANGED
@@ -1,420 +1,420 @@
1
- import os
2
- import json
3
- import subprocess
4
- import ast
5
-
6
- # Import strict prompts
7
- from prompt import *
8
-
9
- from langchain_openai import ChatOpenAI
10
- from langchain.prompts import ChatPromptTemplate
11
-
12
-
13
- def add_app_to_installed_apps(settings_path: str, app_name: str):
14
- with open(settings_path, "r", encoding="utf-8") as f:
15
- lines = f.readlines()
16
-
17
- in_installed_apps = False
18
- already_added = False
19
- new_lines = []
20
-
21
- for line in lines:
22
- if line.strip().startswith("INSTALLED_APPS"):
23
- in_installed_apps = True
24
-
25
- if in_installed_apps and f'"{app_name}"' in line:
26
- already_added = True
27
-
28
- if in_installed_apps and line.strip() == "]":
29
- if not already_added:
30
- new_lines.append(f' "{app_name}",\n')
31
- in_installed_apps = False
32
-
33
- new_lines.append(line)
34
-
35
- if not already_added:
36
- with open(settings_path, "w", encoding="utf-8") as f:
37
- f.writelines(new_lines)
38
-
39
-
40
- # ============================================================
41
- # HELPER FUNCTIONS
42
- # ============================================================
43
-
44
- def run_cmd(command, cwd=None):
45
- """Runs terminal commands safely."""
46
- result = subprocess.run(
47
- command.split(),
48
- cwd=cwd,
49
- stdout=subprocess.PIPE,
50
- stderr=subprocess.PIPE,
51
- text=True
52
- )
53
-
54
-
55
- if result.returncode != 0:
56
- print("❌ Command failed:", command)
57
- print(result.stderr)
58
- raise Exception(result.stderr)
59
- return result.stdout
60
-
61
-
62
- def write_file(path, content):
63
- os.makedirs(os.path.dirname(path), exist_ok=True)
64
- with open(path, "w", encoding="utf8") as f:
65
- f.write(content)
66
-
67
-
68
- def is_valid_python(code: str) -> bool:
69
- try:
70
- ast.parse(code)
71
- return True
72
- except:
73
- return False
74
-
75
- # ===================README FILE GENERATOR ===================
76
-
77
- def generate_readme_with_llm(spec_json, project_path, llm):
78
- chain = ChatPromptTemplate.from_messages([
79
- ("system", README_PROMPT),
80
- ("user", "{json_input}")
81
- ])
82
-
83
- runnable = chain | llm
84
- result = runnable.invoke({
85
- "json_input": json.dumps(spec_json, indent=2)
86
- })
87
-
88
- readme_content = result.content.strip()
89
-
90
- write_file(os.path.join(project_path, "README.md"), readme_content)
91
- print("📘 README.md generated successfully.")
92
-
93
-
94
- # ============================================================
95
- # CUSTOM FILE VALIDATORS
96
- # ============================================================
97
-
98
- def is_valid_serializer(code):
99
- """Rejects dynamic serializer patterns."""
100
- forbidden = ["globals(", "type(", "for ", "_create", "json_input"]
101
- if any(f in code for f in forbidden):
102
- return False
103
- if "class " not in code:
104
- return False
105
- if "Serializer" not in code:
106
- return False
107
- return is_valid_python(code)
108
-
109
-
110
- def is_valid_views(code):
111
- """Reject dynamic views or helper functions."""
112
- forbidden = ["globals(", "type(", "_create", "for ", "json_input"]
113
- if any(f in code for f in forbidden):
114
- return False
115
- if "APIView" not in code:
116
- return False
117
- return is_valid_python(code)
118
-
119
-
120
- def is_valid_urls(code):
121
- forbidden = ["globals(", "type(", "_create", "json_input"]
122
- if any(f in code for f in forbidden):
123
- return False
124
-
125
- if "urlpatterns" not in code:
126
- return False
127
- if "path(" not in code:
128
- return False
129
- if "<int:pk>" not in code:
130
- return False
131
-
132
- # No leading slash allowed
133
- if 'path("/' in code or "path('/" in code:
134
- return False
135
-
136
- return is_valid_python(code)
137
-
138
-
139
- # ============================================================
140
- # GENERATION LOGIC (AUTO-FIX)
141
- # ============================================================
142
-
143
- def generate_code_with_fix(
144
- prompt,
145
- json_slice,
146
- *,
147
- llm,
148
- validator=None,
149
- retries=5,
150
- ):
151
-
152
- for attempt in range(retries):
153
- print(f"🧠 Generating (attempt {attempt+1})...")
154
-
155
- chain = ChatPromptTemplate.from_messages([
156
- ("system", prompt),
157
- ("user", "{json_input}")
158
- ])
159
-
160
- runnable = chain | llm
161
- result = runnable.invoke({
162
- "json_input": json.dumps(json_slice, indent=2)
163
- })
164
-
165
- code = result.content.strip()
166
-
167
-
168
- # Use custom validator if provided
169
- if validator:
170
- if validator(code):
171
- print("✅ File valid.")
172
- return code
173
- else:
174
- # Syntax-only validation
175
- if is_valid_python(code):
176
- print("✅ File valid.")
177
- return code
178
-
179
- print("❌ Invalid file. Regenerating...")
180
-
181
- raise Exception("❌ Could not generate a valid file after retries.")
182
-
183
-
184
- def generate_urls_with_fix(prompt, json_slice, *, llm, retries=5):
185
- return generate_code_with_fix(prompt, json_slice, validator=is_valid_urls,llm=llm, retries=retries)
186
-
187
-
188
- # ============================================================
189
- # MAIN PROJECT GENERATOR
190
- # ============================================================
191
-
192
- import os
193
- import sys
194
-
195
- def generate_full_project(spec_json, output_dir, llm, project_name):
196
- print("generating project")
197
-
198
- print("creating path")
199
-
200
- project_path = os.path.join(output_dir, project_name)
201
- os.makedirs(output_dir, exist_ok=True)
202
-
203
- PYTHON = sys.executable # ✅ always correct python
204
-
205
- # -----------------------------
206
- # 1) CREATE DJANGO PROJECT
207
- # -----------------------------
208
- try:
209
- print("🚀 Creating Django project...")
210
- run_cmd(f"{PYTHON} -m django startproject {project_name}", cwd=output_dir)
211
- except Exception as e:
212
- raise RuntimeError(f"Failed to create Django project '{project_name}'") from e
213
-
214
- # -----------------------------
215
- # 2) CREATE APPS
216
- # -----------------------------
217
- try:
218
- for app in spec_json.get("apps", {}):
219
- print(f"📦 Creating app: {app}")
220
- run_cmd(f"{PYTHON} manage.py startapp {app}", cwd=project_path)
221
-
222
- settings_path = os.path.join(
223
- project_path,
224
- project_name,
225
- "settings.py"
226
- )
227
-
228
- add_app_to_installed_apps(settings_path, app)
229
-
230
- except Exception as e:
231
- raise RuntimeError("Failed while creating Django apps") from e
232
-
233
- # -----------------------------
234
- # 3) GENERATE FILES FOR EACH APP
235
- # -----------------------------
236
- for app_name, app_spec in spec_json.get("apps", {}).items():
237
- try:
238
- print(f"📝 Generating code for app: {app_name}")
239
- app_dir = os.path.join(project_path, app_name)
240
-
241
- if not os.path.exists(app_dir):
242
- raise FileNotFoundError(f"App directory not found: {app_dir}")
243
-
244
- models_slice = {"models": app_spec.get("models", {})}
245
-
246
- serializer_slice = {
247
- "model_names": sorted(app_spec.get("models", {}).keys())
248
- }
249
- admin_slice = {"model_names": list(app_spec.get("models", {}).keys())}
250
-
251
- views_slice = {
252
- "model_names": list(app_spec.get("models", {}).keys()),
253
- "apis": app_spec.get("apis", {})
254
- }
255
-
256
- urls_slice = {
257
- "model_names": list(app_spec.get("models", {}).keys()),
258
- "apis": app_spec.get("apis", {}),
259
- "base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
260
- }
261
-
262
- models_code = generate_code_with_fix(
263
- MODELS_PROMPT, models_slice, validator=is_valid_python, llm=llm
264
- )
265
- write_file(os.path.join(app_dir, "models.py"), models_code)
266
-
267
- serializers_code = generate_code_with_fix(
268
- SERIALIZERS_PROMPT, serializer_slice, validator=is_valid_serializer, llm=llm
269
- )
270
- write_file(os.path.join(app_dir, "serializers.py"), serializers_code)
271
-
272
- views_code = generate_code_with_fix(
273
- VIEWS_PROMPT, views_slice, validator=is_valid_views, llm=llm
274
- )
275
- write_file(os.path.join(app_dir, "views.py"), views_code)
276
-
277
- urls_code = generate_urls_with_fix(
278
- URLS_PROMPT, urls_slice, llm=llm
279
- )
280
- write_file(os.path.join(app_dir, "urls.py"), urls_code)
281
-
282
- admin_code = generate_code_with_fix(
283
- ADMIN_PROMPT, admin_slice, validator=is_valid_python, llm=llm
284
- )
285
- write_file(os.path.join(app_dir, "admin.py"), admin_code)
286
-
287
- except Exception as e:
288
- raise RuntimeError(f"Code generation failed for app '{app_name}'") from e
289
-
290
- # -----------------------------
291
- # 4) GENERATE REQUIREMENTS
292
- # -----------------------------
293
- try:
294
- requirements_slice = {
295
- "auth": spec_json.get("auth", {}),
296
- "database": spec_json.get("database", {}),
297
- "deployment": spec_json.get("deployment", {})
298
- }
299
-
300
- requirements_code = generate_code_with_fix(
301
- REQUIREMENTS_PROMPT, requirements_slice, llm=llm
302
- )
303
- write_file(os.path.join(project_path, "requirements.txt"), requirements_code)
304
-
305
- except Exception as e:
306
- raise RuntimeError("Failed to generate requirements.txt") from e
307
-
308
- # -----------------------------
309
- # 5) PROJECT URLS
310
- # -----------------------------
311
- try:
312
- project_urls_slice = {
313
- "project_name": project_name,
314
- "apps": list(spec_json.get("apps", {}).keys()),
315
- "base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
316
- }
317
-
318
- project_urls_code = generate_code_with_fix(
319
- PROJECT_URLS_PROMPT,
320
- project_urls_slice,
321
- validator=is_valid_python, llm=llm
322
- )
323
-
324
- write_file(
325
- os.path.join(project_path, project_name, "urls.py"),
326
- project_urls_code
327
- )
328
-
329
- except Exception as e:
330
- raise RuntimeError("Failed to generate project urls.py") from e
331
-
332
- # -----------------------------
333
- # 6) README (OPTIONAL)
334
- # -----------------------------
335
- try:
336
- generate_readme_with_llm(spec_json, project_path, llm=llm)
337
- except Exception as e:
338
- print("⚠ README generation failed (non-blocking):", e)
339
-
340
- print("\n🎉 DONE! Project created at:", project_path)
341
- return project_path
342
-
343
- def load_json_spec(path="json_output/spec.json"):
344
- with open(path, "r", encoding="utf-8") as f:
345
- return json.load(f)
346
-
347
-
348
- #=============================Prompt_With_Model_Specification================
349
- from langchain.prompts import ChatPromptTemplate
350
-
351
- def generate_django_model_prompt(project_name: str, description: str, llm):
352
- """
353
- Uses LLM to generate a high-quality, structured prompt
354
- for Django model development with clear specifications.
355
- """
356
-
357
- prompt = ChatPromptTemplate.from_messages([
358
- ("system", """
359
- You are a senior Django backend architect with production experience.
360
-
361
- Your task is to generate a SINGLE, CLEAN, PLAIN-TEXT PROMPT
362
- that will later be used to generate Django models.py.
363
-
364
- STRICT RULES:
365
- - Output ONLY plain text.
366
- - Do NOT use markdown, bullet points, headings, or code blocks.
367
- - Do NOT include explanations or commentary.
368
- - Do NOT generate Django code.
369
- - Generate ONLY the final prompt text.
370
-
371
- MODEL DESIGN RULES:
372
- - Follow Django ORM best practices.
373
- - Use deterministic, production-ready model structures.
374
- - Infer missing models or fields if required to complete the system.
375
- - Never invent unnecessary models.
376
- - Every model MUST have a clear real-world purpose.
377
-
378
- FIELD RULES:
379
- - Infer sensible fields when the user does not specify all required fields.
380
- - Use correct Django field types.
381
- - Add timestamps (created_at, updated_at) when appropriate.
382
- - Use UUID primary keys where suitable.
383
- - Do NOT use dynamic patterns or metaprogramming.
384
- - Ensure field names are snake_case.
385
- - Ensure model names are PascalCase.
386
-
387
- RELATIONSHIP RULES:
388
- - Infer relationships only when logically required.
389
- - Use ForeignKey for one-to-many relationships.
390
- - Use OneToOneField only when explicitly required.
391
- - Avoid ManyToMany unless clearly necessary.
392
-
393
- META RULES:
394
- - Include Meta options such as db_table and ordering.
395
- - Include __str__ methods for all models.
396
- - Ensure compatibility with Django REST Framework serializers and views.
397
-
398
- OUTPUT QUALITY RULE:
399
- The generated prompt must be precise, minimal, and implementation-ready,
400
- so that another LLM can generate models.py without making assumptions.
401
- """),
402
- ("user", """
403
- Project Name: {project_name}
404
-
405
- User Requirements:
406
- {description}
407
-
408
- Generate a single, concise, implementation-ready PROMPT
409
- that instructs an LLM to generate Django models.py.
410
- """)
411
- ])
412
-
413
-
414
- chain = prompt | llm
415
- result = chain.invoke({
416
- "project_name": project_name,
417
- "description": description
418
- })
419
-
420
- return result.content.strip()
 
1
+ import os
2
+ import json
3
+ import subprocess
4
+ import ast
5
+
6
+ # Import strict prompts
7
+ from prompt import *
8
+
9
+ from langchain_openai import ChatOpenAI
10
+ from langchain_core.prompts import ChatPromptTemplate
11
+
12
+
13
+ def add_app_to_installed_apps(settings_path: str, app_name: str):
14
+ with open(settings_path, "r", encoding="utf-8") as f:
15
+ lines = f.readlines()
16
+
17
+ in_installed_apps = False
18
+ already_added = False
19
+ new_lines = []
20
+
21
+ for line in lines:
22
+ if line.strip().startswith("INSTALLED_APPS"):
23
+ in_installed_apps = True
24
+
25
+ if in_installed_apps and f'"{app_name}"' in line:
26
+ already_added = True
27
+
28
+ if in_installed_apps and line.strip() == "]":
29
+ if not already_added:
30
+ new_lines.append(f' "{app_name}",\n')
31
+ in_installed_apps = False
32
+
33
+ new_lines.append(line)
34
+
35
+ if not already_added:
36
+ with open(settings_path, "w", encoding="utf-8") as f:
37
+ f.writelines(new_lines)
38
+
39
+
40
+ # ============================================================
41
+ # HELPER FUNCTIONS
42
+ # ============================================================
43
+
44
+ def run_cmd(command, cwd=None):
45
+ """Runs terminal commands safely."""
46
+ result = subprocess.run(
47
+ command.split(),
48
+ cwd=cwd,
49
+ stdout=subprocess.PIPE,
50
+ stderr=subprocess.PIPE,
51
+ text=True
52
+ )
53
+
54
+
55
+ if result.returncode != 0:
56
+ print("❌ Command failed:", command)
57
+ print(result.stderr)
58
+ raise Exception(result.stderr)
59
+ return result.stdout
60
+
61
+
62
+ def write_file(path, content):
63
+ os.makedirs(os.path.dirname(path), exist_ok=True)
64
+ with open(path, "w", encoding="utf8") as f:
65
+ f.write(content)
66
+
67
+
68
+ def is_valid_python(code: str) -> bool:
69
+ try:
70
+ ast.parse(code)
71
+ return True
72
+ except:
73
+ return False
74
+
75
+ # ===================README FILE GENERATOR ===================
76
+
77
+ def generate_readme_with_llm(spec_json, project_path, llm):
78
+ chain = ChatPromptTemplate.from_messages([
79
+ ("system", README_PROMPT),
80
+ ("user", "{json_input}")
81
+ ])
82
+
83
+ runnable = chain | llm
84
+ result = runnable.invoke({
85
+ "json_input": json.dumps(spec_json, indent=2)
86
+ })
87
+
88
+ readme_content = result.content.strip()
89
+
90
+ write_file(os.path.join(project_path, "README.md"), readme_content)
91
+ print("📘 README.md generated successfully.")
92
+
93
+
94
+ # ============================================================
95
+ # CUSTOM FILE VALIDATORS
96
+ # ============================================================
97
+
98
+ def is_valid_serializer(code):
99
+ """Rejects dynamic serializer patterns."""
100
+ forbidden = ["globals(", "type(", "for ", "_create", "json_input"]
101
+ if any(f in code for f in forbidden):
102
+ return False
103
+ if "class " not in code:
104
+ return False
105
+ if "Serializer" not in code:
106
+ return False
107
+ return is_valid_python(code)
108
+
109
+
110
+ def is_valid_views(code):
111
+ """Reject dynamic views or helper functions."""
112
+ forbidden = ["globals(", "type(", "_create", "for ", "json_input"]
113
+ if any(f in code for f in forbidden):
114
+ return False
115
+ if "APIView" not in code:
116
+ return False
117
+ return is_valid_python(code)
118
+
119
+
120
+ def is_valid_urls(code):
121
+ forbidden = ["globals(", "type(", "_create", "json_input"]
122
+ if any(f in code for f in forbidden):
123
+ return False
124
+
125
+ if "urlpatterns" not in code:
126
+ return False
127
+ if "path(" not in code:
128
+ return False
129
+ if "<int:pk>" not in code:
130
+ return False
131
+
132
+ # No leading slash allowed
133
+ if 'path("/' in code or "path('/" in code:
134
+ return False
135
+
136
+ return is_valid_python(code)
137
+
138
+
139
+ # ============================================================
140
+ # GENERATION LOGIC (AUTO-FIX)
141
+ # ============================================================
142
+
143
+ def generate_code_with_fix(
144
+ prompt,
145
+ json_slice,
146
+ *,
147
+ llm,
148
+ validator=None,
149
+ retries=5,
150
+ ):
151
+
152
+ for attempt in range(retries):
153
+ print(f"🧠 Generating (attempt {attempt+1})...")
154
+
155
+ chain = ChatPromptTemplate.from_messages([
156
+ ("system", prompt),
157
+ ("user", "{json_input}")
158
+ ])
159
+
160
+ runnable = chain | llm
161
+ result = runnable.invoke({
162
+ "json_input": json.dumps(json_slice, indent=2)
163
+ })
164
+
165
+ code = result.content.strip()
166
+
167
+
168
+ # Use custom validator if provided
169
+ if validator:
170
+ if validator(code):
171
+ print("✅ File valid.")
172
+ return code
173
+ else:
174
+ # Syntax-only validation
175
+ if is_valid_python(code):
176
+ print("✅ File valid.")
177
+ return code
178
+
179
+ print("❌ Invalid file. Regenerating...")
180
+
181
+ raise Exception("❌ Could not generate a valid file after retries.")
182
+
183
+
184
+ def generate_urls_with_fix(prompt, json_slice, *, llm, retries=5):
185
+ return generate_code_with_fix(prompt, json_slice, validator=is_valid_urls,llm=llm, retries=retries)
186
+
187
+
188
+ # ============================================================
189
+ # MAIN PROJECT GENERATOR
190
+ # ============================================================
191
+
192
+ import os
193
+ import sys
194
+
195
+ def generate_full_project(spec_json, output_dir, llm, project_name):
196
+ print("generating project")
197
+
198
+ print("creating path")
199
+
200
+ project_path = os.path.join(output_dir, project_name)
201
+ os.makedirs(output_dir, exist_ok=True)
202
+
203
+ PYTHON = sys.executable # ✅ always correct python
204
+
205
+ # -----------------------------
206
+ # 1) CREATE DJANGO PROJECT
207
+ # -----------------------------
208
+ try:
209
+ print("🚀 Creating Django project...")
210
+ run_cmd(f"{PYTHON} -m django startproject {project_name}", cwd=output_dir)
211
+ except Exception as e:
212
+ raise RuntimeError(f"Failed to create Django project '{project_name}'") from e
213
+
214
+ # -----------------------------
215
+ # 2) CREATE APPS
216
+ # -----------------------------
217
+ try:
218
+ for app in spec_json.get("apps", {}):
219
+ print(f"📦 Creating app: {app}")
220
+ run_cmd(f"{PYTHON} manage.py startapp {app}", cwd=project_path)
221
+
222
+ settings_path = os.path.join(
223
+ project_path,
224
+ project_name,
225
+ "settings.py"
226
+ )
227
+
228
+ add_app_to_installed_apps(settings_path, app)
229
+
230
+ except Exception as e:
231
+ raise RuntimeError("Failed while creating Django apps") from e
232
+
233
+ # -----------------------------
234
+ # 3) GENERATE FILES FOR EACH APP
235
+ # -----------------------------
236
+ for app_name, app_spec in spec_json.get("apps", {}).items():
237
+ try:
238
+ print(f"📝 Generating code for app: {app_name}")
239
+ app_dir = os.path.join(project_path, app_name)
240
+
241
+ if not os.path.exists(app_dir):
242
+ raise FileNotFoundError(f"App directory not found: {app_dir}")
243
+
244
+ models_slice = {"models": app_spec.get("models", {})}
245
+
246
+ serializer_slice = {
247
+ "model_names": sorted(app_spec.get("models", {}).keys())
248
+ }
249
+ admin_slice = {"model_names": list(app_spec.get("models", {}).keys())}
250
+
251
+ views_slice = {
252
+ "model_names": list(app_spec.get("models", {}).keys()),
253
+ "apis": app_spec.get("apis", {})
254
+ }
255
+
256
+ urls_slice = {
257
+ "model_names": list(app_spec.get("models", {}).keys()),
258
+ "apis": app_spec.get("apis", {}),
259
+ "base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
260
+ }
261
+
262
+ models_code = generate_code_with_fix(
263
+ MODELS_PROMPT, models_slice, validator=is_valid_python, llm=llm
264
+ )
265
+ write_file(os.path.join(app_dir, "models.py"), models_code)
266
+
267
+ serializers_code = generate_code_with_fix(
268
+ SERIALIZERS_PROMPT, serializer_slice, validator=is_valid_serializer, llm=llm
269
+ )
270
+ write_file(os.path.join(app_dir, "serializers.py"), serializers_code)
271
+
272
+ views_code = generate_code_with_fix(
273
+ VIEWS_PROMPT, views_slice, validator=is_valid_views, llm=llm
274
+ )
275
+ write_file(os.path.join(app_dir, "views.py"), views_code)
276
+
277
+ urls_code = generate_urls_with_fix(
278
+ URLS_PROMPT, urls_slice, llm=llm
279
+ )
280
+ write_file(os.path.join(app_dir, "urls.py"), urls_code)
281
+
282
+ admin_code = generate_code_with_fix(
283
+ ADMIN_PROMPT, admin_slice, validator=is_valid_python, llm=llm
284
+ )
285
+ write_file(os.path.join(app_dir, "admin.py"), admin_code)
286
+
287
+ except Exception as e:
288
+ raise RuntimeError(f"Code generation failed for app '{app_name}'") from e
289
+
290
+ # -----------------------------
291
+ # 4) GENERATE REQUIREMENTS
292
+ # -----------------------------
293
+ try:
294
+ requirements_slice = {
295
+ "auth": spec_json.get("auth", {}),
296
+ "database": spec_json.get("database", {}),
297
+ "deployment": spec_json.get("deployment", {})
298
+ }
299
+
300
+ requirements_code = generate_code_with_fix(
301
+ REQUIREMENTS_PROMPT, requirements_slice, llm=llm
302
+ )
303
+ write_file(os.path.join(project_path, "requirements.txt"), requirements_code)
304
+
305
+ except Exception as e:
306
+ raise RuntimeError("Failed to generate requirements.txt") from e
307
+
308
+ # -----------------------------
309
+ # 5) PROJECT URLS
310
+ # -----------------------------
311
+ try:
312
+ project_urls_slice = {
313
+ "project_name": project_name,
314
+ "apps": list(spec_json.get("apps", {}).keys()),
315
+ "base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
316
+ }
317
+
318
+ project_urls_code = generate_code_with_fix(
319
+ PROJECT_URLS_PROMPT,
320
+ project_urls_slice,
321
+ validator=is_valid_python, llm=llm
322
+ )
323
+
324
+ write_file(
325
+ os.path.join(project_path, project_name, "urls.py"),
326
+ project_urls_code
327
+ )
328
+
329
+ except Exception as e:
330
+ raise RuntimeError("Failed to generate project urls.py") from e
331
+
332
+ # -----------------------------
333
+ # 6) README (OPTIONAL)
334
+ # -----------------------------
335
+ try:
336
+ generate_readme_with_llm(spec_json, project_path, llm=llm)
337
+ except Exception as e:
338
+ print("⚠ README generation failed (non-blocking):", e)
339
+
340
+ print("\n🎉 DONE! Project created at:", project_path)
341
+ return project_path
342
+
343
+ def load_json_spec(path="json_output/spec.json"):
344
+ with open(path, "r", encoding="utf-8") as f:
345
+ return json.load(f)
346
+
347
+
348
+ #=============================Prompt_With_Model_Specification================
349
+ from langchain.prompts import ChatPromptTemplate
350
+
351
+ def generate_django_model_prompt(project_name: str, description: str, llm):
352
+ """
353
+ Uses LLM to generate a high-quality, structured prompt
354
+ for Django model development with clear specifications.
355
+ """
356
+
357
+ prompt = ChatPromptTemplate.from_messages([
358
+ ("system", """
359
+ You are a senior Django backend architect with production experience.
360
+
361
+ Your task is to generate a SINGLE, CLEAN, PLAIN-TEXT PROMPT
362
+ that will later be used to generate Django models.py.
363
+
364
+ STRICT RULES:
365
+ - Output ONLY plain text.
366
+ - Do NOT use markdown, bullet points, headings, or code blocks.
367
+ - Do NOT include explanations or commentary.
368
+ - Do NOT generate Django code.
369
+ - Generate ONLY the final prompt text.
370
+
371
+ MODEL DESIGN RULES:
372
+ - Follow Django ORM best practices.
373
+ - Use deterministic, production-ready model structures.
374
+ - Infer missing models or fields if required to complete the system.
375
+ - Never invent unnecessary models.
376
+ - Every model MUST have a clear real-world purpose.
377
+
378
+ FIELD RULES:
379
+ - Infer sensible fields when the user does not specify all required fields.
380
+ - Use correct Django field types.
381
+ - Add timestamps (created_at, updated_at) when appropriate.
382
+ - Use UUID primary keys where suitable.
383
+ - Do NOT use dynamic patterns or metaprogramming.
384
+ - Ensure field names are snake_case.
385
+ - Ensure model names are PascalCase.
386
+
387
+ RELATIONSHIP RULES:
388
+ - Infer relationships only when logically required.
389
+ - Use ForeignKey for one-to-many relationships.
390
+ - Use OneToOneField only when explicitly required.
391
+ - Avoid ManyToMany unless clearly necessary.
392
+
393
+ META RULES:
394
+ - Include Meta options such as db_table and ordering.
395
+ - Include __str__ methods for all models.
396
+ - Ensure compatibility with Django REST Framework serializers and views.
397
+
398
+ OUTPUT QUALITY RULE:
399
+ The generated prompt must be precise, minimal, and implementation-ready,
400
+ so that another LLM can generate models.py without making assumptions.
401
+ """),
402
+ ("user", """
403
+ Project Name: {project_name}
404
+
405
+ User Requirements:
406
+ {description}
407
+
408
+ Generate a single, concise, implementation-ready PROMPT
409
+ that instructs an LLM to generate Django models.py.
410
+ """)
411
+ ])
412
+
413
+
414
+ chain = prompt | llm
415
+ result = chain.invoke({
416
+ "project_name": project_name,
417
+ "description": description
418
+ })
419
+
420
+ return result.content.strip()