easytt1178 commited on
Commit
bb6e1c1
·
1 Parent(s): fc2ef27

feat: scaffold 5-agent orchestrator with Gradio UI

Browse files
README.md CHANGED
@@ -29,6 +29,18 @@ Model card
29
  # model = AutoModel.from_pretrained("easytt1178/example-model")
30
  ```
31
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  ## Training Data and Evaluation
33
  - Describe datasets, preprocessing, metrics, and evaluation results.
34
 
 
29
  # model = AutoModel.from_pretrained("easytt1178/example-model")
30
  ```
31
 
32
+ ## Multi-Agent Orchestrator (Local)
33
+ - Requirements: `pip install -r requirements.txt`
34
+ - Set token (for HF Inference API): `$env:HF_TOKEN = "hf_..."`
35
+ - Run UI: `python app.py` then open the shown URL.
36
+
37
+ Agents
38
+ - Code: generate code via HF Inference API
39
+ - Vision: text-to-image via HF Inference API
40
+ - Bot: scaffolds Telegram/Discord bots
41
+ - Reasoning: simple planning steps
42
+
43
+
44
  ## Training Data and Evaluation
45
  - Describe datasets, preprocessing, metrics, and evaluation results.
46
 
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from PIL import Image
4
+ from src.orchestrator import run_task
5
+
6
+
7
+ def ui_generate_code(prompt, language):
8
+ return run_task("code", {"prompt": prompt, "language": language})
9
+
10
+
11
+ def ui_generate_image(prompt):
12
+ img = run_task("image", {"prompt": prompt})
13
+ return img
14
+
15
+
16
+ def ui_create_bot(bot_type):
17
+ path = run_task("bot", {"bot_type": bot_type})
18
+ return f"Created bot template at: {path}"
19
+
20
+
21
+ def ui_plan(goal):
22
+ steps = run_task("plan", {"goal": goal})
23
+ return "\n".join(f"- {s}" for s in steps)
24
+
25
+
26
+ with gr.Blocks(title="Multi-Agent Orchestrator") as demo:
27
+ gr.Markdown("# Multi-Agent: Code, Vision, Bot, Reasoning")
28
+
29
+ with gr.Tab("Code"):
30
+ prompt = gr.Textbox(label="What to build?", lines=6)
31
+ lang = gr.Dropdown(choices=["python", "javascript", "typescript", "go", "rust"], value="python", label="Language")
32
+ out = gr.Code(language="python", label="Generated Code")
33
+ gr.Button("Generate").click(ui_generate_code, inputs=[prompt, lang], outputs=out)
34
+
35
+ with gr.Tab("Image"):
36
+ iprompt = gr.Textbox(label="Image prompt", lines=3)
37
+ img = gr.Image(type="pil", label="Output")
38
+ gr.Button("Generate Image").click(ui_generate_image, inputs=[iprompt], outputs=img)
39
+
40
+ with gr.Tab("Bot"):
41
+ btype = gr.Dropdown(choices=["telegram", "discord"], value="telegram", label="Bot type")
42
+ bot_out = gr.Textbox(label="Result")
43
+ gr.Button("Create Bot Template").click(ui_create_bot, inputs=[btype], outputs=bot_out)
44
+
45
+ with gr.Tab("Reasoning / Plan"):
46
+ goal = gr.Textbox(label="Goal description", lines=4)
47
+ plan_out = gr.Textbox(label="Plan")
48
+ gr.Button("Make Plan").click(ui_plan, inputs=[goal], outputs=plan_out)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ # Expect HF_TOKEN to be set for InferenceClient usage.
53
+ demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", "7860")))
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ huggingface_hub>=0.24.6
2
+ gradio>=4.40.0
3
+ Pillow>=10.4.0
4
+ transformers>=4.44.2
src/agents/bot_agent.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ TELEGRAM_TEMPLATE = """"""
5
+ import os
6
+ import logging
7
+ from telegram import Update
8
+ from telegram.ext import Application, CommandHandler, MessageHandler, filters, ContextTypes
9
+
10
+ TOKEN = os.getenv("TELEGRAM_TOKEN")
11
+
12
+ async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
13
+ await update.message.reply_text("Hello! I'm your assistant bot.")
14
+
15
+ async def echo(update: Update, context: ContextTypes.DEFAULT_TYPE):
16
+ await update.message.reply_text(update.message.text)
17
+
18
+ def main():
19
+ if not TOKEN:
20
+ raise RuntimeError("Set TELEGRAM_TOKEN env var")
21
+ app = Application.builder().token(TOKEN).build()
22
+ app.add_handler(CommandHandler("start", start))
23
+ app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, echo))
24
+ app.run_polling()
25
+
26
+ if __name__ == "__main__":
27
+ main()
28
+ """"""
29
+
30
+
31
+ DISCORD_TEMPLATE = """"""
32
+ import os
33
+ import discord
34
+
35
+ TOKEN = os.getenv("DISCORD_TOKEN")
36
+ intents = discord.Intents.default()
37
+ intents.message_content = True
38
+ client = discord.Client(intents=intents)
39
+
40
+ @client.event
41
+ async def on_ready():
42
+ print(f"Logged in as {client.user}")
43
+
44
+ @client.event
45
+ async def on_message(message: discord.Message):
46
+ if message.author == client.user:
47
+ return
48
+ if message.content.startswith("!ping"):
49
+ await message.channel.send("pong")
50
+
51
+ def main():
52
+ if not TOKEN:
53
+ raise RuntimeError("Set DISCORD_TOKEN env var")
54
+ client.run(TOKEN)
55
+
56
+ if __name__ == "__main__":
57
+ main()
58
+ """"""
59
+
60
+
61
+ def create_bot(bot_type: str, output_dir: str = "bots") -> str:
62
+ os.makedirs(output_dir, exist_ok=True)
63
+ if bot_type == "telegram":
64
+ path = os.path.join(output_dir, "telegram_bot.py")
65
+ with open(path, "w", encoding="utf-8") as f:
66
+ f.write(TELEGRAM_TEMPLATE)
67
+ return path
68
+ elif bot_type == "discord":
69
+ path = os.path.join(output_dir, "discord_bot.py")
70
+ with open(path, "w", encoding="utf-8") as f:
71
+ f.write(DISCORD_TEMPLATE)
72
+ return path
73
+ else:
74
+ raise ValueError("Unsupported bot_type. Use 'telegram' or 'discord'.")
src/agents/code_agent.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+
5
+
6
+ DEFAULT_MODEL = os.environ.get("HF_CODE_MODEL", "HuggingFaceH4/zephyr-7b-beta")
7
+
8
+
9
+ def generate_code(prompt: str, language: str = "python", max_new_tokens: int = 512) -> str:
10
+ """Generate code or technical text via HF Inference API.
11
+
12
+ Falls back to a simple template if API call fails.
13
+ """
14
+ client = InferenceClient(token=os.environ.get("HF_TOKEN"))
15
+ system_hint = f"You are an expert {language} code assistant. Output only code unless explanation is requested."
16
+ full_prompt = f"{system_hint}\nTask: {prompt}\nLanguage: {language}"
17
+ try:
18
+ text = client.text_generation(
19
+ model=DEFAULT_MODEL,
20
+ prompt=full_prompt,
21
+ max_new_tokens=max_new_tokens,
22
+ temperature=0.2,
23
+ )
24
+ return text.strip()
25
+ except Exception as e:
26
+ return f"# Fallback template due to API error: {e}\nprint('Hello from {language} generator')\n"
src/agents/reasoning_agent.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+
4
+ def plan_tasks(goal: str) -> List[str]:
5
+ """Very simple planner that decomposes a goal into actionable steps."""
6
+ steps = [
7
+ f"Clarify goal: {goal}",
8
+ "Identify inputs, outputs, constraints",
9
+ "Choose tool/model per subtask",
10
+ "Draft solution and validate against constraints",
11
+ "Iterate fixes and produce final output",
12
+ ]
13
+ return steps
src/agents/vision_agent.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from huggingface_hub import InferenceClient
3
+ from PIL import Image
4
+ import io
5
+ import os
6
+
7
+
8
+ TEXT_TO_IMAGE_MODEL = os.environ.get("HF_IMAGE_MODEL", "stabilityai/stable-diffusion-2-1")
9
+
10
+
11
+ def generate_image(prompt: str, guidance_scale: float = 7.5, num_inference_steps: int = 30) -> Image.Image:
12
+ """Generate an image from text using HF Inference API."""
13
+ client = InferenceClient(token=os.environ.get("HF_TOKEN"))
14
+ try:
15
+ img_bytes = client.text_to_image(
16
+ model=TEXT_TO_IMAGE_MODEL,
17
+ prompt=prompt,
18
+ guidance_scale=guidance_scale,
19
+ num_inference_steps=num_inference_steps,
20
+ )
21
+ return Image.open(io.BytesIO(img_bytes))
22
+ except Exception as e:
23
+ # Return a placeholder image
24
+ img = Image.new("RGB", (512, 512), color=(240, 240, 240))
25
+ return img
src/orchestrator.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal, Dict, Any
2
+ from src.agents.code_agent import generate_code
3
+ from src.agents.vision_agent import generate_image
4
+ from src.agents.bot_agent import create_bot
5
+ from src.agents.reasoning_agent import plan_tasks
6
+
7
+
8
+ TaskType = Literal["code", "image", "bot", "plan"]
9
+
10
+
11
+ def run_task(task_type: TaskType, payload: Dict[str, Any]):
12
+ if task_type == "code":
13
+ return generate_code(payload.get("prompt", ""), payload.get("language", "python"))
14
+ elif task_type == "image":
15
+ return generate_image(payload.get("prompt", ""))
16
+ elif task_type == "bot":
17
+ return create_bot(payload.get("bot_type", "telegram"))
18
+ elif task_type == "plan":
19
+ return plan_tasks(payload.get("goal", ""))
20
+ else:
21
+ raise ValueError("Unsupported task type")