ai-rest-api-generator / generator.py
harshadh01's picture
Update generator.py
dc0c6d1 verified
import os
import json
import subprocess
import ast
# Import strict prompts
from prompt import *
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
def add_app_to_installed_apps(settings_path: str, app_name: str):
with open(settings_path, "r", encoding="utf-8") as f:
lines = f.readlines()
in_installed_apps = False
already_added = False
new_lines = []
for line in lines:
if line.strip().startswith("INSTALLED_APPS"):
in_installed_apps = True
if in_installed_apps and f'"{app_name}"' in line:
already_added = True
if in_installed_apps and line.strip() == "]":
if not already_added:
new_lines.append(f' "{app_name}",\n')
in_installed_apps = False
new_lines.append(line)
if not already_added:
with open(settings_path, "w", encoding="utf-8") as f:
f.writelines(new_lines)
# ============================================================
# HELPER FUNCTIONS
# ============================================================
def run_cmd(command, cwd=None):
"""Runs terminal commands safely."""
result = subprocess.run(
command.split(),
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
if result.returncode != 0:
print("❌ Command failed:", command)
print(result.stderr)
raise Exception(result.stderr)
return result.stdout
def write_file(path, content):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w", encoding="utf8") as f:
f.write(content)
def is_valid_python(code: str) -> bool:
try:
ast.parse(code)
return True
except:
return False
# ===================README FILE GENERATOR ===================
def generate_readme_with_llm(spec_json, project_path, llm):
chain = ChatPromptTemplate.from_messages([
("system", README_PROMPT),
("user", "{json_input}")
])
runnable = chain | llm
result = runnable.invoke({
"json_input": json.dumps(spec_json, indent=2)
})
readme_content = result.content.strip()
write_file(os.path.join(project_path, "README.md"), readme_content)
print("📘 README.md generated successfully.")
# ============================================================
# CUSTOM FILE VALIDATORS
# ============================================================
def is_valid_serializer(code):
"""Rejects dynamic serializer patterns."""
forbidden = ["globals(", "type(", "for ", "_create", "json_input"]
if any(f in code for f in forbidden):
return False
if "class " not in code:
return False
if "Serializer" not in code:
return False
return is_valid_python(code)
def is_valid_views(code):
"""Reject dynamic views or helper functions."""
forbidden = ["globals(", "type(", "_create", "for ", "json_input"]
if any(f in code for f in forbidden):
return False
if "APIView" not in code:
return False
return is_valid_python(code)
def is_valid_urls(code):
forbidden = ["globals(", "type(", "_create", "json_input"]
if any(f in code for f in forbidden):
return False
if "urlpatterns" not in code:
return False
if "path(" not in code:
return False
if "<int:pk>" not in code:
return False
# No leading slash allowed
if 'path("/' in code or "path('/" in code:
return False
return is_valid_python(code)
# ============================================================
# GENERATION LOGIC (AUTO-FIX)
# ============================================================
def generate_code_with_fix(
prompt,
json_slice,
*,
llm,
validator=None,
retries=5,
):
for attempt in range(retries):
print(f"🧠 Generating (attempt {attempt+1})...")
chain = ChatPromptTemplate.from_messages([
("system", prompt),
("user", "{json_input}")
])
runnable = chain | llm
result = runnable.invoke({
"json_input": json.dumps(json_slice, indent=2)
})
code = result.content.strip()
# Use custom validator if provided
if validator:
if validator(code):
print("✅ File valid.")
return code
else:
# Syntax-only validation
if is_valid_python(code):
print("✅ File valid.")
return code
print("❌ Invalid file. Regenerating...")
raise Exception("❌ Could not generate a valid file after retries.")
def generate_urls_with_fix(prompt, json_slice, *, llm, retries=5):
return generate_code_with_fix(prompt, json_slice, validator=is_valid_urls,llm=llm, retries=retries)
# ============================================================
# MAIN PROJECT GENERATOR
# ============================================================
import os
import sys
def generate_full_project(spec_json, output_dir, llm, project_name):
print("generating project")
print("creating path")
project_path = os.path.join(output_dir, project_name)
os.makedirs(output_dir, exist_ok=True)
PYTHON = sys.executable # ✅ always correct python
# -----------------------------
# 1) CREATE DJANGO PROJECT
# -----------------------------
try:
print("🚀 Creating Django project...")
run_cmd(f"{PYTHON} -m django startproject {project_name}", cwd=output_dir)
except Exception as e:
raise RuntimeError(f"Failed to create Django project '{project_name}'") from e
# -----------------------------
# 2) CREATE APPS
# -----------------------------
try:
for app in spec_json.get("apps", {}):
print(f"📦 Creating app: {app}")
run_cmd(f"{PYTHON} manage.py startapp {app}", cwd=project_path)
settings_path = os.path.join(
project_path,
project_name,
"settings.py"
)
add_app_to_installed_apps(settings_path, app)
except Exception as e:
raise RuntimeError("Failed while creating Django apps") from e
# -----------------------------
# 3) GENERATE FILES FOR EACH APP
# -----------------------------
for app_name, app_spec in spec_json.get("apps", {}).items():
try:
print(f"📝 Generating code for app: {app_name}")
app_dir = os.path.join(project_path, app_name)
if not os.path.exists(app_dir):
raise FileNotFoundError(f"App directory not found: {app_dir}")
models_slice = {"models": app_spec.get("models", {})}
serializer_slice = {
"model_names": sorted(app_spec.get("models", {}).keys())
}
admin_slice = {"model_names": list(app_spec.get("models", {}).keys())}
views_slice = {
"model_names": list(app_spec.get("models", {}).keys()),
"apis": app_spec.get("apis", {})
}
urls_slice = {
"model_names": list(app_spec.get("models", {}).keys()),
"apis": app_spec.get("apis", {}),
"base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
}
models_code = generate_code_with_fix(
MODELS_PROMPT, models_slice, validator=is_valid_python, llm=llm
)
write_file(os.path.join(app_dir, "models.py"), models_code)
serializers_code = generate_code_with_fix(
SERIALIZERS_PROMPT, serializer_slice, validator=is_valid_serializer, llm=llm
)
write_file(os.path.join(app_dir, "serializers.py"), serializers_code)
views_code = generate_code_with_fix(
VIEWS_PROMPT, views_slice, validator=is_valid_views, llm=llm
)
write_file(os.path.join(app_dir, "views.py"), views_code)
urls_code = generate_urls_with_fix(
URLS_PROMPT, urls_slice, llm=llm
)
write_file(os.path.join(app_dir, "urls.py"), urls_code)
admin_code = generate_code_with_fix(
ADMIN_PROMPT, admin_slice, validator=is_valid_python, llm=llm
)
write_file(os.path.join(app_dir, "admin.py"), admin_code)
except Exception as e:
raise RuntimeError(f"Code generation failed for app '{app_name}'") from e
# -----------------------------
# 4) GENERATE REQUIREMENTS
# -----------------------------
try:
requirements_slice = {
"auth": spec_json.get("auth", {}),
"database": spec_json.get("database", {}),
"deployment": spec_json.get("deployment", {})
}
requirements_code = generate_code_with_fix(
REQUIREMENTS_PROMPT, requirements_slice, llm=llm
)
write_file(os.path.join(project_path, "requirements.txt"), requirements_code)
except Exception as e:
raise RuntimeError("Failed to generate requirements.txt") from e
# -----------------------------
# 5) PROJECT URLS
# -----------------------------
try:
project_urls_slice = {
"project_name": project_name,
"apps": list(spec_json.get("apps", {}).keys()),
"base_url": spec_json.get("api_config", {}).get("base_url", "/api/")
}
project_urls_code = generate_code_with_fix(
PROJECT_URLS_PROMPT,
project_urls_slice,
validator=is_valid_python, llm=llm
)
write_file(
os.path.join(project_path, project_name, "urls.py"),
project_urls_code
)
except Exception as e:
raise RuntimeError("Failed to generate project urls.py") from e
# -----------------------------
# 6) README (OPTIONAL)
# -----------------------------
try:
generate_readme_with_llm(spec_json, project_path, llm=llm)
except Exception as e:
print("⚠ README generation failed (non-blocking):", e)
print("\n🎉 DONE! Project created at:", project_path)
return project_path
def load_json_spec(path="json_output/spec.json"):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
#=============================Prompt_With_Model_Specification================
def generate_django_model_prompt(project_name: str, description: str, llm):
"""
Uses LLM to generate a high-quality, structured prompt
for Django model development with clear specifications.
"""
prompt = ChatPromptTemplate.from_messages([
("system", """
You are a senior Django backend architect with production experience.
Your task is to generate a SINGLE, CLEAN, PLAIN-TEXT PROMPT
that will later be used to generate Django models.py.
STRICT RULES:
- Output ONLY plain text.
- Do NOT use markdown, bullet points, headings, or code blocks.
- Do NOT include explanations or commentary.
- Do NOT generate Django code.
- Generate ONLY the final prompt text.
MODEL DESIGN RULES:
- Follow Django ORM best practices.
- Use deterministic, production-ready model structures.
- Infer missing models or fields if required to complete the system.
- Never invent unnecessary models.
- Every model MUST have a clear real-world purpose.
FIELD RULES:
- Infer sensible fields when the user does not specify all required fields.
- Use correct Django field types.
- Add timestamps (created_at, updated_at) when appropriate.
- Use UUID primary keys where suitable.
- Do NOT use dynamic patterns or metaprogramming.
- Ensure field names are snake_case.
- Ensure model names are PascalCase.
RELATIONSHIP RULES:
- Infer relationships only when logically required.
- Use ForeignKey for one-to-many relationships.
- Use OneToOneField only when explicitly required.
- Avoid ManyToMany unless clearly necessary.
META RULES:
- Include Meta options such as db_table and ordering.
- Include __str__ methods for all models.
- Ensure compatibility with Django REST Framework serializers and views.
OUTPUT QUALITY RULE:
The generated prompt must be precise, minimal, and implementation-ready,
so that another LLM can generate models.py without making assumptions.
"""),
("user", """
Project Name: {project_name}
User Requirements:
{description}
Generate a single, concise, implementation-ready PROMPT
that instructs an LLM to generate Django models.py.
""")
])
chain = prompt | llm
result = chain.invoke({
"project_name": project_name,
"description": description
})
return result.content.strip()