42hgyn26hz-cpu commited on
Commit
4f4aa9b
·
1 Parent(s): 35c564d

Initial commit

Browse files
.vscode/settings.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "python.analysis.extraPaths": [
3
+ "./api"
4
+ ]
5
+ }
api/Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10
2
+
3
+ WORKDIR /app
4
+
5
+ COPY requirements.txt .
6
+ RUN pip install -r requirements.txt
7
+
8
+ COPY app ./app
9
+
10
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
api/app/__init__.py ADDED
File without changes
api/app/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (163 Bytes). View file
 
api/app/__pycache__/config.cpython-312.pyc ADDED
Binary file (353 Bytes). View file
 
api/app/__pycache__/orchestrator.cpython-312.pyc ADDED
Binary file (1.09 kB). View file
 
api/app/agents/__init__.py ADDED
File without changes
api/app/agents/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (170 Bytes). View file
 
api/app/agents/__pycache__/backend.cpython-312.pyc ADDED
Binary file (767 Bytes). View file
 
api/app/agents/__pycache__/devops.cpython-312.pyc ADDED
Binary file (1.2 kB). View file
 
api/app/agents/__pycache__/frontend.cpython-312.pyc ADDED
Binary file (1.2 kB). View file
 
api/app/agents/__pycache__/planner.cpython-312.pyc ADDED
Binary file (773 Bytes). View file
 
api/app/agents/__pycache__/reviewer.cpython-312.pyc ADDED
Binary file (1.19 kB). View file
 
api/app/agents/backend.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from app.config import CODE_MODEL_URL
3
+
4
+ def backend_agent(plan):
5
+ payload = {
6
+ "model": "deepseek-ai/DeepSeek-Coder-V2-Instruct",
7
+ "messages": [{"role": "user", "content": f"Generate FastAPI backend:\n{plan}"}],
8
+ "temperature": 0.2
9
+ }
10
+
11
+ r = requests.post(CODE_MODEL_URL, json=payload)
12
+ return r.json()["choices"][0]["message"]["content"]
api/app/agents/devops.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from app.config import REASON_MODEL_URL
3
+
4
+ MODEL_NAME = "Qwen/Qwen2.5-7B-Instruct"
5
+
6
+
7
+ def devops_agent(plan: str) -> str:
8
+ prompt = f"""
9
+ You are a DevOps engineer.
10
+
11
+ Based on this project plan:
12
+ {plan}
13
+
14
+ Generate:
15
+ - Dockerfile for backend
16
+ - docker-compose.yml
17
+ - Production deployment notes
18
+ - Environment variables structure
19
+
20
+ Return clean production-ready configuration files.
21
+ """
22
+
23
+ payload = {
24
+ "model": MODEL_NAME,
25
+ "messages": [{"role": "user", "content": prompt}],
26
+ "temperature": 0.2,
27
+ }
28
+
29
+ response = requests.post(REASON_MODEL_URL, json=payload, timeout=300)
30
+ response.raise_for_status()
31
+
32
+ return response.json()["choices"][0]["message"]["content"]
api/app/agents/frontend.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from app.config import CODE_MODEL_URL
3
+
4
+ MODEL_NAME = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
5
+
6
+
7
+ def frontend_agent(plan: str) -> str:
8
+ prompt = f"""
9
+ You are a senior frontend engineer.
10
+
11
+ Based on this backend plan:
12
+ {plan}
13
+
14
+ Generate:
15
+ - NextJS app structure
16
+ - API integration example
17
+ - Auth integration
18
+ - Clean folder structure
19
+
20
+ Return full production-ready frontend code.
21
+ """
22
+
23
+ payload = {
24
+ "model": MODEL_NAME,
25
+ "messages": [{"role": "user", "content": prompt}],
26
+ "temperature": 0.3,
27
+ }
28
+
29
+ response = requests.post(CODE_MODEL_URL, json=payload, timeout=300)
30
+ response.raise_for_status()
31
+
32
+ return response.json()["choices"][0]["message"]["content"]
api/app/agents/planner.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from app.config import REASON_MODEL_URL
3
+
4
+ def planner_agent(prompt):
5
+ payload = {
6
+ "model": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
7
+ "messages": [{"role": "user", "content": f"Break this task into steps:\n{prompt}"}],
8
+ "temperature": 0.2
9
+ }
10
+
11
+ r = requests.post(REASON_MODEL_URL, json=payload)
12
+ return r.json()["choices"][0]["message"]["content"]
api/app/agents/reviewer.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from app.config import REASON_MODEL_URL
3
+
4
+ MODEL_NAME = "Qwen/Qwen3-Coder-480B-A35B-Instruct"
5
+
6
+
7
+ def reviewer_agent(code: str) -> str:
8
+ prompt = f"""
9
+ You are a senior code reviewer.
10
+
11
+ Review the following code:
12
+ - Fix bugs
13
+ - Improve performance
14
+ - Improve structure
15
+ - Remove duplication
16
+ - Add missing error handling
17
+
18
+ Return improved version only.
19
+
20
+ CODE:
21
+ {code}
22
+ """
23
+
24
+ payload = {
25
+ "model": MODEL_NAME,
26
+ "messages": [{"role": "user", "content": prompt}],
27
+ "temperature": 0.1,
28
+ }
29
+
30
+ response = requests.post(REASON_MODEL_URL, json=payload, timeout=300)
31
+ response.raise_for_status()
32
+
33
+ return response.json()["choices"][0]["message"]["content"]
api/app/config.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ CODE_MODEL_URL = "http://vllm-code:8000/v1/chat/completions"
2
+ REASON_MODEL_URL = "http://vllm-reason:8000/v1/chat/completions"
3
+
4
+ REDIS_HOST = "redis"
5
+ REDIS_PORT = 6379
api/app/main.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from pathlib import Path
3
+
4
+ sys.path.insert(0, str(Path(__file__).parent.parent))
5
+
6
+ from fastapi import FastAPI
7
+ from orchestrator import autodev
8
+
9
+ app = FastAPI()
10
+
11
+ @app.post("/autodev")
12
+ async def run_autodev(data: dict):
13
+ result = await autodev(data["prompt"])
14
+ return result
api/app/orchestrator.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from agents.planner import planner_agent
3
+ from agents.backend import backend_agent
4
+ from tools.test_runner import run_tests
5
+
6
+ MAX_ITER = 3
7
+
8
+ async def autodev(prompt):
9
+
10
+ plan = planner_agent(prompt)
11
+
12
+ code = backend_agent(plan)
13
+
14
+ for _ in range(MAX_ITER):
15
+
16
+ with open("generated/main.py", "w") as f:
17
+ f.write(code)
18
+
19
+ status, logs = run_tests()
20
+
21
+ if status == 0:
22
+ return {"status": "success", "code": code}
23
+
24
+ fix_prompt = f"Fix this code:\n\n{logs}"
25
+ code = backend_agent(fix_prompt)
26
+
27
+ return {"status": "failed"}
api/app/router.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sentence_transformers import SentenceTransformer
2
+ import numpy as np
3
+ from sklearn.metrics.pairwise import cosine_similarity
4
+
5
+ embed_model = SentenceTransformer("BAAI/bge-large-en")
6
+
7
+ task_map ={
8
+ "CODE": {
9
+ "agent": "backend",
10
+ "prompt": "You are the Backend Architect."
11
+ },
12
+ "REASON": {
13
+ "agent": "planner",
14
+ "prompt": "You are the Logic Planner."
15
+ },
16
+ "CHAT": {
17
+ "agent": "frontend",
18
+ "prompt": "You are the User Interface."
19
+ }
20
+ }
21
+
22
+ # Cách sử dụng trong Orchestrator:
23
+ # selected_task = task_map["CODE"]
24
+ # system_prompt = selected_task["system_role"]
25
+
26
+
27
+ task_names = list(task_map.keys())
28
+ task_vectors = embed_model.encode(list(task_map.values()))
29
+
30
+ def route(prompt):
31
+ prompt_vec = embed_model.encode([prompt])
32
+ sims = cosine_similarity(prompt_vec, task_vectors)[0]
33
+ return task_names[np.argmax(sims)]
api/app/tools/__init__.py ADDED
File without changes
api/app/tools/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (169 Bytes). View file
 
api/app/tools/__pycache__/docker_sandbox.cpython-312.pyc ADDED
Binary file (2.03 kB). View file
 
api/app/tools/__pycache__/file_writer.cpython-312.pyc ADDED
Binary file (2.06 kB). View file
 
api/app/tools/__pycache__/test_runner.cpython-312.pyc ADDED
Binary file (551 Bytes). View file
 
api/app/tools/docker_sandbox.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import uuid
3
+ from pathlib import Path
4
+
5
+
6
+ class DockerSandbox:
7
+
8
+ def __init__(self, project_dir="generated"):
9
+ self.project_dir = Path(project_dir)
10
+
11
+ def build_image(self):
12
+ image_tag = f"ai_dev_{uuid.uuid4().hex[:8]}"
13
+
14
+ cmd = [
15
+ "docker",
16
+ "build",
17
+ "-t",
18
+ image_tag,
19
+ str(self.project_dir)
20
+ ]
21
+
22
+ result = subprocess.run(
23
+ cmd,
24
+ capture_output=True,
25
+ text=True
26
+ )
27
+
28
+ if result.returncode != 0:
29
+ return False, result.stderr
30
+
31
+ return True, image_tag
32
+
33
+ def run_container(self, image_tag, timeout=20):
34
+ cmd = [
35
+ "docker",
36
+ "run",
37
+ "--rm",
38
+ "--network", "none", # Disable internet
39
+ "--memory", "512m",
40
+ "--cpus", "1",
41
+ image_tag
42
+ ]
43
+
44
+ result = subprocess.run(
45
+ cmd,
46
+ capture_output=True,
47
+ text=True,
48
+ timeout=timeout
49
+ )
50
+
51
+ return result.returncode, result.stdout, result.stderr
52
+
53
+ def cleanup_image(self, image_tag):
54
+ subprocess.run(
55
+ ["docker", "rmi", "-f", image_tag],
56
+ capture_output=True
57
+ )
api/app/tools/file_writer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+
5
+ class FileWriter:
6
+
7
+ BASE_DIR = Path("generated")
8
+
9
+ def __init__(self):
10
+ self.BASE_DIR.mkdir(exist_ok=True)
11
+
12
+ def write(self, relative_path: str, content: str, overwrite=True):
13
+ target_path = self.BASE_DIR / relative_path
14
+
15
+ # Prevent directory traversal attack
16
+ if not str(target_path.resolve()).startswith(str(self.BASE_DIR.resolve())):
17
+ raise ValueError("Invalid path detected")
18
+
19
+ target_path.parent.mkdir(parents=True, exist_ok=True)
20
+
21
+ if target_path.exists() and not overwrite:
22
+ return f"{relative_path} already exists"
23
+
24
+ with open(target_path, "w", encoding="utf-8") as f:
25
+ f.write(content)
26
+
27
+ return f"Written: {relative_path}"
28
+
29
+ def read(self, relative_path: str):
30
+ target_path = self.BASE_DIR / relative_path
31
+
32
+ if not target_path.exists():
33
+ return None
34
+
35
+ with open(target_path, "r", encoding="utf-8") as f:
36
+ return f.read()
api/app/tools/test_runner.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+
3
+ def run_tests():
4
+ result = subprocess.run(
5
+ ["pytest"],
6
+ capture_output=True,
7
+ text=True
8
+ )
9
+ return result.returncode, result.stdout + result.stderr
api/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ redis
4
+ requests
5
+ asyncio
6
+ pytest
docker-compose.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.9"
2
+
3
+ services:
4
+
5
+ redis:
6
+ image: redis:7
7
+ ports:
8
+ - "6379:6379"
9
+
10
+ vllm-code:
11
+ image: vllm/vllm-openai:latest
12
+ command: >
13
+ --model deepseek-ai/DeepSeek-Coder-V2-Instruct
14
+ --dtype float16
15
+ --port 8001
16
+ ports:
17
+ - "8001:8000"
18
+ deploy:
19
+ resources:
20
+ reservations:
21
+ devices:
22
+ - capabilities: [gpu]
23
+
24
+ vllm-reason:
25
+ image: vllm/vllm-openai:latest
26
+ command: >
27
+ --model Qwen/Qwen3-Coder-480B-A35B-Instruct
28
+ --dtype float16
29
+ --port 8002
30
+ ports:
31
+ - "8002:8000"
32
+
33
+ api:
34
+ build: ./api
35
+ ports:
36
+ - "8000:8000"
37
+ depends_on:
38
+ - redis
39
+ - vllm-code
40
+ - vllm-reason
worker/Dockerfile ADDED
File without changes
worker/requirements.txt ADDED
File without changes
worker/worker.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import signal
5
+ import sys
6
+ import traceback
7
+ from typing import Dict
8
+ from pathlib import Path
9
+
10
+ import redis
11
+
12
+ sys.path.insert(0, str(Path(__file__).parent.parent / 'api'))
13
+
14
+ from app.agents.planner import planner_agent
15
+ from app.agents.backend import backend_agent
16
+ from app.agents.frontend import frontend_agent
17
+ from app.agents.devops import devops_agent
18
+ from app.agents.reviewer import reviewer_agent
19
+ from app.tools.file_writer import FileWriter
20
+ from app.tools.docker_sandbox import DockerSandbox
21
+
22
+
23
+ # ==============================
24
+ # CONFIG
25
+ # ==============================
26
+
27
+ REDIS_HOST = os.getenv("REDIS_HOST", "redis")
28
+ REDIS_PORT = int(os.getenv("REDIS_PORT", 6379))
29
+ QUEUE_NAME = os.getenv("QUEUE_NAME", "autodev_queue")
30
+
31
+ MAX_DEBUG_ITER = 3
32
+
33
+
34
+ # ==============================
35
+ # REDIS CONNECTION
36
+ # ==============================
37
+
38
+ redis_client = redis.Redis(
39
+ host=REDIS_HOST,
40
+ port=REDIS_PORT,
41
+ decode_responses=True,
42
+ )
43
+
44
+
45
+ # ==============================
46
+ # WORKER CORE
47
+ # ==============================
48
+
49
+ class AutoDevWorker:
50
+
51
+ def __init__(self):
52
+ self.file_writer = FileWriter()
53
+ self.sandbox = DockerSandbox()
54
+ self.running = True
55
+
56
+ async def process_job(self, job_data: Dict):
57
+
58
+ prompt = job_data.get("prompt")
59
+ job_id = job_data.get("job_id")
60
+
61
+ print(f"[Worker] Processing job {job_id}")
62
+
63
+ # 1️⃣ Planning
64
+ plan = planner_agent(prompt)
65
+
66
+ # 2️⃣ Async parallel generation
67
+ backend_task = asyncio.to_thread(backend_agent, plan)
68
+ frontend_task = asyncio.to_thread(frontend_agent, plan)
69
+ devops_task = asyncio.to_thread(devops_agent, plan)
70
+
71
+ backend_code, frontend_code, devops_code = await asyncio.gather(
72
+ backend_task,
73
+ frontend_task,
74
+ devops_task
75
+ )
76
+
77
+ # 3️⃣ Write files
78
+ self.file_writer.write("backend/main.py", backend_code)
79
+ self.file_writer.write("frontend/app.js", frontend_code)
80
+ self.file_writer.write("Dockerfile", devops_code)
81
+
82
+ # 4️⃣ Self-debug loop
83
+ final_backend_code = await self.self_debug("backend/main.py")
84
+
85
+ # 5️⃣ Docker sandbox build & run
86
+ success, image_tag = self.sandbox.build_image()
87
+
88
+ result = {
89
+ "job_id": job_id,
90
+ "status": "completed",
91
+ "docker_success": success,
92
+ }
93
+
94
+ if success:
95
+ status, stdout, stderr = self.sandbox.run_container(image_tag)
96
+ self.sandbox.cleanup_image(image_tag)
97
+
98
+ result["run_status"] = status
99
+ result["stdout"] = stdout
100
+ result["stderr"] = stderr
101
+
102
+ # 6️⃣ Push result
103
+ redis_client.set(f"result:{job_id}", json.dumps(result))
104
+
105
+ print(f"[Worker] Job {job_id} completed")
106
+
107
+ async def self_debug(self, file_path: str):
108
+
109
+ for i in range(MAX_DEBUG_ITER):
110
+ print(f"[Worker] Debug iteration {i+1}")
111
+
112
+ code = self.file_writer.read(file_path)
113
+ improved_code = reviewer_agent(code)
114
+
115
+ self.file_writer.write(file_path, improved_code)
116
+
117
+ return improved_code
118
+
119
+ async def run(self):
120
+
121
+ print("[Worker] Started...")
122
+
123
+ while self.running:
124
+ try:
125
+ job = redis_client.blpop(QUEUE_NAME, timeout=5)
126
+
127
+ if job:
128
+ _, job_data = job
129
+ job_data = json.loads(job_data)
130
+
131
+ await self.process_job(job_data)
132
+
133
+ except Exception as e:
134
+ print("[Worker] ERROR:", str(e))
135
+ traceback.print_exc()
136
+
137
+ def stop(self):
138
+ print("[Worker] Shutting down...")
139
+ self.running = False
140
+
141
+
142
+ # ==============================
143
+ # ENTRY POINT
144
+ # ==============================
145
+
146
+ worker = AutoDevWorker()
147
+
148
+
149
+ def shutdown_handler(signum, frame):
150
+ worker.stop()
151
+ sys.exit(0)
152
+
153
+
154
+ signal.signal(signal.SIGINT, shutdown_handler)
155
+ signal.signal(signal.SIGTERM, shutdown_handler)
156
+
157
+
158
+ if __name__ == "__main__":
159
+ asyncio.run(worker.run())