File size: 3,520 Bytes
90f7e9c ba1f5cd 90f7e9c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | #!/usr/bin/env python3
"""Agent Zero Research - Literature analysis agent with workspace isolation."""
import os, sys, json, time, threading
from pathlib import Path
from datetime import datetime
import gradio as gr
import requests as req
HF_TOKEN = os.environ.get("HF_TOKEN", "")
AGENT_NAME = "Research"
AGENT_ROLE = "research_analyst"
MODEL_NAME = os.environ.get("MODEL_NAME", "ScottzillaSystems/Huihui-Qwen3.5-9B-Claude-4.6-Opus-abliterated")
WORKSPACE_DIR = Path("/app/workspace/projects/research")
SHARED_DIR = Path("/app/workspace/shared")
TASK_QUEUE_DIR = SHARED_DIR / "task_queue"
for d in [WORKSPACE_DIR, TASK_QUEUE_DIR]: d.mkdir(parents=True, exist_ok=True)
def query_model(prompt: str) -> str:
api_url = f"https://api-inference.huggingface.co/models/{MODEL_NAME}"
for attempt in range(3):
try:
resp = req.post(api_url, headers={"Authorization": f"Bearer {HF_TOKEN}"},
json={"inputs": prompt, "parameters": {"max_new_tokens": 4096, "temperature": 0.6}},
timeout=180)
if resp.status_code == 200:
r = resp.json()
return r[0].get("generated_text","") if isinstance(r,list) else str(r)
time.sleep(10*(attempt+1))
except: time.sleep(5)
return "[ERROR] Model unavailable"
def check_tasks():
tasks = []
for f in TASK_QUEUE_DIR.glob("*.json"):
t = json.loads(f.read_text())
if t.get("assigned_to") == "research" and t.get("status") == "pending":
tasks.append(t)
return tasks
def execute_task(task):
task["status"] = "in_progress"
(TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))
prompt = f"You are a research analyst. Conduct thorough research on: {task['description']}. Provide citations, analysis, and recommendations."
result = query_model(prompt)
out_file = WORKSPACE_DIR / f"{task['task_id']}.md"
out_file.write_text(f"# Research: {task['description']}\n\n{result}")
task["status"] = "completed"
task["result"] = result[:2000]
task["output_file"] = str(out_file)
task["completed_at"] = datetime.now().isoformat()
(TASK_QUEUE_DIR / f"{task['task_id']}.json").write_text(json.dumps(task, indent=2))
def autonomous_loop():
while True:
for task in check_tasks(): execute_task(task)
time.sleep(60)
threading.Thread(target=autonomous_loop, daemon=True).start()
demo = gr.Blocks(title=f"Agent Zero - {AGENT_NAME}", theme=gr.themes.Soft())
with demo:
gr.Markdown(f"# 🔬 Agent Zero: {AGENT_NAME}\n**Role:** {AGENT_ROLE} | **Model:** {MODEL_NAME}")
with gr.Tabs():
with gr.TabItem("💬 Chat"):
chatbot = gr.Chatbot(height=400)
msg = gr.Textbox(label="Research topic")
send = gr.Button("Send")
def respond(m, h):
r = query_model(f"Research topic: {m}. Provide comprehensive analysis with citations.")
h = h or []; h.append((m, r[:2000])); return "", h
send.click(respond, [msg, chatbot], [msg, chatbot])
with gr.TabItem("📄 Research Reports"):
files = gr.Dropdown(label="Reports", choices=[f.name for f in WORKSPACE_DIR.glob("*.md")])
report = gr.Markdown()
def load_report(fname):
if fname: return (WORKSPACE_DIR / fname).read_text()
return ""
files.change(load_report, files, report)
demo.queue().launch(server_name="0.0.0.0", server_port=7860) |