File size: 3,544 Bytes
59bbaea
af84b73
59bbaea
 
 
 
 
 
 
 
af84b73
 
5ed073d
59bbaea
af84b73
59bbaea
 
 
 
 
 
 
 
 
af84b73
59bbaea
 
 
 
 
 
 
 
 
 
 
 
af84b73
59bbaea
 
 
 
 
 
af84b73
59bbaea
af84b73
 
59bbaea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
af84b73
59bbaea
 
 
af84b73
59bbaea
 
af84b73
59bbaea
 
af84b73
 
 
 
59bbaea
 
af84b73
59bbaea
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#!/usr/bin/env python3
"""Agent Zero Code Architect - Software design agent with workspace isolation."""

import os, sys, json, time, threading
from pathlib import Path
from datetime import datetime
import gradio as gr
import requests as req

HF_TOKEN = os.environ.get("HF_TOKEN", "")
AGENT_NAME = "Code Architect"
AGENT_ROLE = "code_architect"
MODEL_NAME = os.environ.get("MODEL_NAME", "ScottzillaSystems/Huihui-Qwen3-Coder-Next-Opus-4.6-Reasoning-Distilled-abliterated")

WORKSPACE_DIR = Path("/app/workspace/projects/code-architect")
SHARED_DIR = Path("/app/workspace/shared")
TASK_QUEUE_DIR = SHARED_DIR / "task_queue"
for d in [WORKSPACE_DIR, TASK_QUEUE_DIR]: d.mkdir(parents=True, exist_ok=True)

def query_model(prompt: str) -> str:
    api_url = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
    for attempt in range(3):
        try:
            resp = req.post(api_url, headers={"Authorization": f"Bearer {HF_TOKEN}"},
                json={"inputs": prompt, "parameters": {"max_new_tokens": 4096, "temperature": 0.5}},
                timeout=180)
            if resp.status_code == 200:
                r = resp.json()
                return r[0].get("generated_text","") if isinstance(r,list) else str(r)
            time.sleep(10*(attempt+1))
        except: time.sleep(5)
    return "[ERROR] Model unavailable"

def check_tasks():
    tasks = []
    for f in TASK_QUEUE_DIR.glob("*.json"):
        t = json.loads(f.read_text())
        if t.get("assigned_to") == "code-architect" and t.get("status") == "pending":
            tasks.append(t)
    return tasks

def execute_task(task):
    task["status"] = "in_progress"
    (TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))
    prompt = f"You are a software architect. Design and implement: {task['description']}. Provide complete, production-ready code with tests."
    result = query_model(prompt)
    out_file = WORKSPACE_DIR / f"{task['task_id']}.py"
    out_file.write_text(result)
    task["status"] = "completed"
    task["result"] = result[:2000]
    task["output_file"] = str(out_file)
    task["completed_at"] = datetime.now().isoformat()
    (TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))

def autonomous_loop():
    while True:
        for task in check_tasks(): execute_task(task)
        time.sleep(60)

threading.Thread(target=autonomous_loop, daemon=True).start()

demo = gr.Blocks(title=f"Agent Zero - {AGENT_NAME}", theme=gr.themes.Soft())
with demo:
    gr.Markdown(f"# 🏗️ Agent Zero: {AGENT_NAME}\n**Role:** {AGENT_ROLE} | **Model:** {MODEL_NAME}")
    with gr.Tabs():
        with gr.TabItem("💬 Chat"):
            chatbot = gr.Chatbot(height=400)
            msg = gr.Textbox(label="Architecture task")
            send = gr.Button("Send")
            def respond(m, h):
                r = query_model(f"Software architecture task. Design complete implementation for: {m}")
                h = h or []; h.append((m, r[:2000])); return "", h
            send.click(respond, [msg, chatbot], [msg, chatbot])
        with gr.TabItem("📁 Generated Code"):
            files = gr.Dropdown(label="Generated files", choices=[f.name for f in WORKSPACE_DIR.glob("*.py")])
            code = gr.Code(label="Source", language="python")
            def load_code(fname):
                if fname: return (WORKSPACE_DIR / fname).read_text()
                return ""
            files.change(load_code, files, code)

demo.queue().launch(server_name="0.0.0.0", server_port=7860)