Upload wrapper.py
Browse files- wrapper.py +18 -18
wrapper.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
-
"""Agent Zero
|
| 3 |
|
| 4 |
import os, sys, json, time, threading
|
| 5 |
from pathlib import Path
|
|
@@ -8,11 +8,11 @@ import gradio as gr
|
|
| 8 |
import requests as req
|
| 9 |
|
| 10 |
HF_TOKEN = os.environ.get("HF_TOKEN", "")
|
| 11 |
-
AGENT_NAME = "
|
| 12 |
-
AGENT_ROLE = "
|
| 13 |
-
MODEL_NAME = os.environ.get("MODEL_NAME", "DavidAU/
|
| 14 |
|
| 15 |
-
WORKSPACE_DIR = Path("/app/workspace/projects/
|
| 16 |
SHARED_DIR = Path("/app/workspace/shared")
|
| 17 |
TASK_QUEUE_DIR = SHARED_DIR / "task_queue"
|
| 18 |
for d in [WORKSPACE_DIR, TASK_QUEUE_DIR]: d.mkdir(parents=True, exist_ok=True)
|
|
@@ -22,7 +22,7 @@ def query_model(prompt: str) -> str:
|
|
| 22 |
for attempt in range(3):
|
| 23 |
try:
|
| 24 |
resp = req.post(api_url, headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
| 25 |
-
json={"inputs": prompt, "parameters": {"max_new_tokens": 4096, "temperature": 0.
|
| 26 |
timeout=180)
|
| 27 |
if resp.status_code == 200:
|
| 28 |
r = resp.json()
|
|
@@ -35,17 +35,17 @@ def check_tasks():
|
|
| 35 |
tasks = []
|
| 36 |
for f in TASK_QUEUE_DIR.glob("*.json"):
|
| 37 |
t = json.loads(f.read_text())
|
| 38 |
-
if t.get("assigned_to") == "
|
| 39 |
tasks.append(t)
|
| 40 |
return tasks
|
| 41 |
|
| 42 |
def execute_task(task):
|
| 43 |
task["status"] = "in_progress"
|
| 44 |
(TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))
|
| 45 |
-
prompt = f"You are a
|
| 46 |
result = query_model(prompt)
|
| 47 |
-
out_file = WORKSPACE_DIR / f"{task['task_id']}.
|
| 48 |
-
out_file.write_text(
|
| 49 |
task["status"] = "completed"
|
| 50 |
task["result"] = result[:2000]
|
| 51 |
task["output_file"] = str(out_file)
|
|
@@ -61,22 +61,22 @@ threading.Thread(target=autonomous_loop, daemon=True).start()
|
|
| 61 |
|
| 62 |
demo = gr.Blocks(title=f"Agent Zero - {AGENT_NAME}", theme=gr.themes.Soft())
|
| 63 |
with demo:
|
| 64 |
-
gr.Markdown(f"#
|
| 65 |
with gr.Tabs():
|
| 66 |
with gr.TabItem("๐ฌ Chat"):
|
| 67 |
chatbot = gr.Chatbot(height=400)
|
| 68 |
-
msg = gr.Textbox(label="
|
| 69 |
send = gr.Button("Send")
|
| 70 |
def respond(m, h):
|
| 71 |
-
r = query_model(f"
|
| 72 |
h = h or []; h.append((m, r[:2000])); return "", h
|
| 73 |
send.click(respond, [msg, chatbot], [msg, chatbot])
|
| 74 |
-
with gr.TabItem("
|
| 75 |
-
files = gr.Dropdown(label="
|
| 76 |
-
|
| 77 |
-
def
|
| 78 |
if fname: return (WORKSPACE_DIR / fname).read_text()
|
| 79 |
return ""
|
| 80 |
-
files.change(
|
| 81 |
|
| 82 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 1 |
#!/usr/bin/env python3
|
| 2 |
+
"""Agent Zero Code Architect - Software design agent with workspace isolation."""
|
| 3 |
|
| 4 |
import os, sys, json, time, threading
|
| 5 |
from pathlib import Path
|
|
|
|
| 8 |
import requests as req
|
| 9 |
|
| 10 |
HF_TOKEN = os.environ.get("HF_TOKEN", "")
|
| 11 |
+
AGENT_NAME = "Code Architect"
|
| 12 |
+
AGENT_ROLE = "code_architect"
|
| 13 |
+
MODEL_NAME = os.environ.get("MODEL_NAME", "DavidAU/OpenAi-GPT-oss-20b-abliterated-uncensored-NEO-Imatrix-gguf")
|
| 14 |
|
| 15 |
+
WORKSPACE_DIR = Path("/app/workspace/projects/code-architect")
|
| 16 |
SHARED_DIR = Path("/app/workspace/shared")
|
| 17 |
TASK_QUEUE_DIR = SHARED_DIR / "task_queue"
|
| 18 |
for d in [WORKSPACE_DIR, TASK_QUEUE_DIR]: d.mkdir(parents=True, exist_ok=True)
|
|
|
|
| 22 |
for attempt in range(3):
|
| 23 |
try:
|
| 24 |
resp = req.post(api_url, headers={"Authorization": f"Bearer {HF_TOKEN}"},
|
| 25 |
+
json={"inputs": prompt, "parameters": {"max_new_tokens": 4096, "temperature": 0.5}},
|
| 26 |
timeout=180)
|
| 27 |
if resp.status_code == 200:
|
| 28 |
r = resp.json()
|
|
|
|
| 35 |
tasks = []
|
| 36 |
for f in TASK_QUEUE_DIR.glob("*.json"):
|
| 37 |
t = json.loads(f.read_text())
|
| 38 |
+
if t.get("assigned_to") == "code-architect" and t.get("status") == "pending":
|
| 39 |
tasks.append(t)
|
| 40 |
return tasks
|
| 41 |
|
| 42 |
def execute_task(task):
|
| 43 |
task["status"] = "in_progress"
|
| 44 |
(TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))
|
| 45 |
+
prompt = f"You are a software architect. Design and implement: {task['description']}. Provide complete, production-ready code with tests."
|
| 46 |
result = query_model(prompt)
|
| 47 |
+
out_file = WORKSPACE_DIR / f"{task['task_id']}.py"
|
| 48 |
+
out_file.write_text(result)
|
| 49 |
task["status"] = "completed"
|
| 50 |
task["result"] = result[:2000]
|
| 51 |
task["output_file"] = str(out_file)
|
|
|
|
| 61 |
|
| 62 |
demo = gr.Blocks(title=f"Agent Zero - {AGENT_NAME}", theme=gr.themes.Soft())
|
| 63 |
with demo:
|
| 64 |
+
gr.Markdown(f"# ๐๏ธ Agent Zero: {AGENT_NAME}\n**Role:** {AGENT_ROLE} | **Model:** {MODEL_NAME}")
|
| 65 |
with gr.Tabs():
|
| 66 |
with gr.TabItem("๐ฌ Chat"):
|
| 67 |
chatbot = gr.Chatbot(height=400)
|
| 68 |
+
msg = gr.Textbox(label="Architecture task")
|
| 69 |
send = gr.Button("Send")
|
| 70 |
def respond(m, h):
|
| 71 |
+
r = query_model(f"Software architecture task. Design complete implementation for: {m}")
|
| 72 |
h = h or []; h.append((m, r[:2000])); return "", h
|
| 73 |
send.click(respond, [msg, chatbot], [msg, chatbot])
|
| 74 |
+
with gr.TabItem("๐ Generated Code"):
|
| 75 |
+
files = gr.Dropdown(label="Generated files", choices=[f.name for f in WORKSPACE_DIR.glob("*.py")])
|
| 76 |
+
code = gr.Code(label="Source", language="python")
|
| 77 |
+
def load_code(fname):
|
| 78 |
if fname: return (WORKSPACE_DIR / fname).read_text()
|
| 79 |
return ""
|
| 80 |
+
files.change(load_code, files, code)
|
| 81 |
|
| 82 |
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|