Spaces:
Sleeping
Sleeping
Yoon-gu Hwang Claude commited on
Commit ·
820a28c
1
Parent(s): 5dc0c74
Basic Chatbot 제거 및 ML Pipeline만 남김
Browse files- basic_chatbot_workflow.py 삭제
- app.py를 ML Pipeline 단일 인터페이스로 간소화
- TabbedInterface 제거하고 단일 ChatInterface로 변경
- README 업데이트 (Basic Chatbot 기능 설명 제거)
더 집중된 ML Pipeline 전용 애플리케이션
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
- README.md +1 -11
- app.py +9 -216
- basic_chatbot_workflow.py +0 -57
README.md
CHANGED
|
@@ -14,15 +14,6 @@ pinned: false
|
|
| 14 |
A Gradio-based chat interface for LangGraph supervisor workflow with nested agent visualization and ML pipeline automation.
|
| 15 |
|
| 16 |
## Features
|
| 17 |
-
|
| 18 |
-
### Basic Chatbot
|
| 19 |
-
- 🤖 Multi-agent system with supervisor workflow
|
| 20 |
-
- 🔍 Research agent with web search capabilities
|
| 21 |
-
- 🧮 Math agent for calculations
|
| 22 |
-
- 💬 Interactive chat interface with nested thoughts visualization
|
| 23 |
-
- 🎨 Real-time streaming of agent execution steps
|
| 24 |
-
|
| 25 |
-
### ML Pipeline Automation
|
| 26 |
- 📊 Data extraction from RDB tables using SQL
|
| 27 |
- 🔤 Language model pretraining with tokenization
|
| 28 |
- 🎯 Classification model finetuning
|
|
@@ -79,8 +70,7 @@ To deploy on HuggingFace Spaces, set these secrets:
|
|
| 79 |
|
| 80 |
## Project Structure
|
| 81 |
|
| 82 |
-
- `app.py` - Main
|
| 83 |
-
- `basic_chatbot_workflow.py` - Basic chatbot supervisor workflow with research and math agents
|
| 84 |
- `ml_pipeline_workflow.py` - ML pipeline supervisor workflow with 4 specialized agents
|
| 85 |
- `pyproject.toml` - Project dependencies managed by uv
|
| 86 |
- `.env` - Environment variables (not tracked in git)
|
|
|
|
| 14 |
A Gradio-based chat interface for LangGraph supervisor workflow with nested agent visualization and ML pipeline automation.
|
| 15 |
|
| 16 |
## Features
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
- 📊 Data extraction from RDB tables using SQL
|
| 18 |
- 🔤 Language model pretraining with tokenization
|
| 19 |
- 🎯 Classification model finetuning
|
|
|
|
| 70 |
|
| 71 |
## Project Structure
|
| 72 |
|
| 73 |
+
- `app.py` - Main Gradio application
|
|
|
|
| 74 |
- `ml_pipeline_workflow.py` - ML pipeline supervisor workflow with 4 specialized agents
|
| 75 |
- `pyproject.toml` - Project dependencies managed by uv
|
| 76 |
- `.env` - Environment variables (not tracked in git)
|
app.py
CHANGED
|
@@ -2,8 +2,7 @@ import gradio as gr
|
|
| 2 |
from gradio import ChatMessage
|
| 3 |
from langchain_core.messages import BaseMessage, HumanMessage
|
| 4 |
import time
|
| 5 |
-
from
|
| 6 |
-
from ml_pipeline_workflow import ml_app as ml_pipeline_workflow
|
| 7 |
|
| 8 |
|
| 9 |
def format_namespace(namespace):
|
|
@@ -11,182 +10,6 @@ def format_namespace(namespace):
|
|
| 11 |
return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"
|
| 12 |
|
| 13 |
|
| 14 |
-
def generate_response_basic_chatbot(message, history):
|
| 15 |
-
"""Basic Chatbot 워크플로우에 대한 nested thought 응답 생성"""
|
| 16 |
-
inputs = {
|
| 17 |
-
"messages": [HumanMessage(content=message)],
|
| 18 |
-
}
|
| 19 |
-
|
| 20 |
-
response = []
|
| 21 |
-
message_id_counter = [0] # Use list for mutable counter
|
| 22 |
-
agent_header_ids = {} # Map agent names to their header message IDs
|
| 23 |
-
current_namespace = None # Track current namespace to detect changes
|
| 24 |
-
|
| 25 |
-
for namespace, chunk in basic_chatbot_workflow.stream(
|
| 26 |
-
inputs,
|
| 27 |
-
stream_mode="updates",
|
| 28 |
-
subgraphs=True
|
| 29 |
-
):
|
| 30 |
-
start_time = time.time()
|
| 31 |
-
|
| 32 |
-
for node_name, node_chunk in chunk.items():
|
| 33 |
-
formatted_namespace = format_namespace(namespace)
|
| 34 |
-
|
| 35 |
-
# Complete previous non-header message FIRST (before namespace change check)
|
| 36 |
-
if len(response) > 0 and response[-1].metadata.get("status") == "pending":
|
| 37 |
-
# Only complete if it's not an agent header
|
| 38 |
-
if response[-1].metadata.get("id") not in agent_header_ids.values():
|
| 39 |
-
prev_msg = response[-1]
|
| 40 |
-
prev_msg.metadata["status"] = "done"
|
| 41 |
-
if "start_time" in prev_msg.metadata:
|
| 42 |
-
prev_msg.metadata["duration"] = time.time() - prev_msg.metadata["start_time"]
|
| 43 |
-
|
| 44 |
-
# Update parent agent header to show completion
|
| 45 |
-
prev_parent_id = prev_msg.metadata.get("parent_id")
|
| 46 |
-
if prev_parent_id:
|
| 47 |
-
prev_title = prev_msg.metadata.get("title", "unknown")
|
| 48 |
-
for msg in response:
|
| 49 |
-
if msg.metadata.get("id") == prev_parent_id and msg.metadata.get("is_agent_header"):
|
| 50 |
-
# Add completion log
|
| 51 |
-
if not msg.content or "🔄" in msg.content:
|
| 52 |
-
msg.content = f"✓ {prev_title}"
|
| 53 |
-
else:
|
| 54 |
-
msg.content += f"\n✓ {prev_title}"
|
| 55 |
-
break
|
| 56 |
-
|
| 57 |
-
yield response # Yield to show completion
|
| 58 |
-
|
| 59 |
-
# If namespace changed, complete the previous agent header
|
| 60 |
-
if current_namespace and current_namespace != formatted_namespace:
|
| 61 |
-
if current_namespace in agent_header_ids:
|
| 62 |
-
header_id = agent_header_ids[current_namespace]
|
| 63 |
-
# Find and complete the agent header
|
| 64 |
-
for msg in response:
|
| 65 |
-
if msg.metadata.get("id") == header_id and msg.metadata.get("status") == "pending":
|
| 66 |
-
msg.metadata["status"] = "done"
|
| 67 |
-
if "start_time" in msg.metadata:
|
| 68 |
-
msg.metadata["duration"] = time.time() - msg.metadata["start_time"]
|
| 69 |
-
yield response
|
| 70 |
-
break
|
| 71 |
-
|
| 72 |
-
current_namespace = formatted_namespace
|
| 73 |
-
|
| 74 |
-
# If this is a subgraph node, ensure parent header exists
|
| 75 |
-
parent_id = None
|
| 76 |
-
if formatted_namespace != "root graph":
|
| 77 |
-
# This node is inside an agent subgraph
|
| 78 |
-
if formatted_namespace not in agent_header_ids:
|
| 79 |
-
# Create agent header message first
|
| 80 |
-
message_id_counter[0] += 1
|
| 81 |
-
header_id = message_id_counter[0]
|
| 82 |
-
agent_header_ids[formatted_namespace] = header_id
|
| 83 |
-
|
| 84 |
-
agent_emojis = {
|
| 85 |
-
"supervisor": "👔",
|
| 86 |
-
"research_expert": "🔍",
|
| 87 |
-
"math_expert": "🔢"
|
| 88 |
-
}
|
| 89 |
-
emoji = agent_emojis.get(formatted_namespace, "🧠")
|
| 90 |
-
|
| 91 |
-
header_message = ChatMessage(
|
| 92 |
-
content="",
|
| 93 |
-
metadata={
|
| 94 |
-
"title": f"{emoji} {formatted_namespace}",
|
| 95 |
-
"id": header_id,
|
| 96 |
-
"status": "pending",
|
| 97 |
-
"start_time": time.time(),
|
| 98 |
-
"is_agent_header": True # Mark as agent header
|
| 99 |
-
}
|
| 100 |
-
)
|
| 101 |
-
response.append(header_message)
|
| 102 |
-
yield response
|
| 103 |
-
|
| 104 |
-
parent_id = agent_header_ids[formatted_namespace]
|
| 105 |
-
|
| 106 |
-
# Create current node message
|
| 107 |
-
message_id_counter[0] += 1
|
| 108 |
-
current_id = message_id_counter[0]
|
| 109 |
-
|
| 110 |
-
# Create title
|
| 111 |
-
if formatted_namespace == "root graph":
|
| 112 |
-
if node_name == "supervisor":
|
| 113 |
-
title = f"👔 {node_name}"
|
| 114 |
-
elif node_name in ["research_expert", "math_expert"]:
|
| 115 |
-
# Skip - we'll handle these when we see their subgraph
|
| 116 |
-
continue
|
| 117 |
-
else:
|
| 118 |
-
title = f"🧠 {node_name}"
|
| 119 |
-
else:
|
| 120 |
-
# This is inside an agent
|
| 121 |
-
title = f"⚙️ {node_name}"
|
| 122 |
-
|
| 123 |
-
# Create node message
|
| 124 |
-
node_message = ChatMessage(
|
| 125 |
-
content="",
|
| 126 |
-
metadata={
|
| 127 |
-
"title": title,
|
| 128 |
-
"id": current_id,
|
| 129 |
-
"status": "pending",
|
| 130 |
-
"start_time": time.time()
|
| 131 |
-
}
|
| 132 |
-
)
|
| 133 |
-
|
| 134 |
-
if parent_id:
|
| 135 |
-
node_message.metadata["parent_id"] = parent_id
|
| 136 |
-
|
| 137 |
-
response.append(node_message)
|
| 138 |
-
yield response
|
| 139 |
-
|
| 140 |
-
# Process node content
|
| 141 |
-
out_str = []
|
| 142 |
-
if isinstance(node_chunk, dict):
|
| 143 |
-
for k, v in node_chunk.items():
|
| 144 |
-
if isinstance(v, BaseMessage):
|
| 145 |
-
out_str.append(v.pretty_repr())
|
| 146 |
-
elif isinstance(v, list):
|
| 147 |
-
for list_item in v:
|
| 148 |
-
if isinstance(list_item, BaseMessage):
|
| 149 |
-
out_str.append(list_item.pretty_repr())
|
| 150 |
-
else:
|
| 151 |
-
out_str.append(str(list_item))
|
| 152 |
-
else:
|
| 153 |
-
out_str.append(f"{k}:\n{v}")
|
| 154 |
-
|
| 155 |
-
response[-1].content = "\n".join(out_str)
|
| 156 |
-
|
| 157 |
-
# Update parent agent header with current activity
|
| 158 |
-
if parent_id:
|
| 159 |
-
for msg in response:
|
| 160 |
-
if msg.metadata.get("id") == parent_id and msg.metadata.get("is_agent_header"):
|
| 161 |
-
# Update agent header content with summary of current activity
|
| 162 |
-
activity_summary = f"🔄 Working on: {node_name}"
|
| 163 |
-
if len(out_str) > 0:
|
| 164 |
-
# Add a brief preview of the content
|
| 165 |
-
preview = out_str[0][:100].replace('\n', ' ')
|
| 166 |
-
activity_summary += f"\n📝 {preview}..."
|
| 167 |
-
msg.content = activity_summary
|
| 168 |
-
break
|
| 169 |
-
|
| 170 |
-
yield response
|
| 171 |
-
|
| 172 |
-
# Keep the node as pending - it will be completed when the next node starts
|
| 173 |
-
# Just yield to show current state
|
| 174 |
-
yield response
|
| 175 |
-
|
| 176 |
-
# Complete any remaining pending messages (both agent headers and sub nodes)
|
| 177 |
-
for msg in response:
|
| 178 |
-
if msg.metadata.get("status") == "pending":
|
| 179 |
-
msg.metadata["status"] = "done"
|
| 180 |
-
if "start_time" in msg.metadata:
|
| 181 |
-
msg.metadata["duration"] = time.time() - msg.metadata["start_time"]
|
| 182 |
-
|
| 183 |
-
# Add final response (without metadata so it displays as regular message)
|
| 184 |
-
if node_chunk and isinstance(node_chunk, dict) and 'messages' in node_chunk:
|
| 185 |
-
final_message = node_chunk['messages'][-1].content
|
| 186 |
-
response.append(ChatMessage(content=final_message))
|
| 187 |
-
yield response
|
| 188 |
-
|
| 189 |
-
|
| 190 |
def handle_like(data: gr.LikeData):
|
| 191 |
"""Like/dislike 이벤트 핸들러"""
|
| 192 |
if data.liked:
|
|
@@ -195,7 +18,7 @@ def handle_like(data: gr.LikeData):
|
|
| 195 |
print(f"👎 Downvoted: {data.value}")
|
| 196 |
|
| 197 |
|
| 198 |
-
def
|
| 199 |
"""ML Pipeline 워크플로우에 대한 nested thought 응답 생성"""
|
| 200 |
inputs = {
|
| 201 |
"messages": [HumanMessage(content=message)],
|
|
@@ -206,7 +29,7 @@ def generate_response_ml_pipeline(message, history):
|
|
| 206 |
agent_header_ids = {} # Map agent names to their header message IDs
|
| 207 |
current_namespace = None # Track current namespace to detect changes
|
| 208 |
|
| 209 |
-
for namespace, chunk in
|
| 210 |
inputs,
|
| 211 |
stream_mode="updates",
|
| 212 |
subgraphs=True
|
|
@@ -375,43 +198,20 @@ def generate_response_ml_pipeline(message, history):
|
|
| 375 |
yield response
|
| 376 |
|
| 377 |
|
| 378 |
-
# Create
|
| 379 |
-
with gr.Blocks() as
|
| 380 |
-
|
| 381 |
type="messages",
|
| 382 |
show_copy_button=True,
|
| 383 |
show_copy_all_button=True,
|
| 384 |
show_share_button=True
|
| 385 |
)
|
| 386 |
-
|
| 387 |
|
| 388 |
gr.ChatInterface(
|
| 389 |
-
|
| 390 |
type="messages",
|
| 391 |
-
chatbot=
|
| 392 |
-
title="🤖 Multi-Agent Chatbot",
|
| 393 |
-
description="Research와 Math 에이전트가 협력하여 작업을 수행합니다. 각 에이전트의 사고 과정을 실시간으로 확인할 수 있습니다.",
|
| 394 |
-
examples=[
|
| 395 |
-
"2024년의 FAANG companies 총 근로자규모에 대한 분석을 한국어로 부탁해!",
|
| 396 |
-
"123과 456을 곱하고 그 결과에 789를 더해줘",
|
| 397 |
-
"구글의 직원 수를 찾아서 애플의 직원 수와 비교해줘"
|
| 398 |
-
],
|
| 399 |
-
cache_examples=False
|
| 400 |
-
)
|
| 401 |
-
|
| 402 |
-
with gr.Blocks() as ml_pipeline_demo:
|
| 403 |
-
ml_pipeline_chatbot = gr.Chatbot(
|
| 404 |
-
type="messages",
|
| 405 |
-
show_copy_button=True,
|
| 406 |
-
show_copy_all_button=True,
|
| 407 |
-
show_share_button=True
|
| 408 |
-
)
|
| 409 |
-
ml_pipeline_chatbot.like(handle_like, None, None)
|
| 410 |
-
|
| 411 |
-
gr.ChatInterface(
|
| 412 |
-
generate_response_ml_pipeline,
|
| 413 |
-
type="messages",
|
| 414 |
-
chatbot=ml_pipeline_chatbot,
|
| 415 |
title="🔬 ML Pipeline Automation",
|
| 416 |
description="데이터 추출, 사전학습, 파인튜닝, 평가 단계를 거치는 완전한 ML 파이프라인입니다. 각 전문가 에이전트의 작업 과정을 단계별로 확인할 수 있습니다.",
|
| 417 |
examples=[
|
|
@@ -433,12 +233,5 @@ with gr.Blocks() as ml_pipeline_demo:
|
|
| 433 |
cache_examples=False
|
| 434 |
)
|
| 435 |
|
| 436 |
-
# Create tabbed interface
|
| 437 |
-
demo = gr.TabbedInterface(
|
| 438 |
-
[ml_pipeline_demo, basic_chatbot_demo],
|
| 439 |
-
["ML Pipeline", "Basic Chatbot"],
|
| 440 |
-
title="Multi-Agent Systems"
|
| 441 |
-
)
|
| 442 |
-
|
| 443 |
if __name__ == "__main__":
|
| 444 |
demo.launch(ssr_mode=False)
|
|
|
|
| 2 |
from gradio import ChatMessage
|
| 3 |
from langchain_core.messages import BaseMessage, HumanMessage
|
| 4 |
import time
|
| 5 |
+
from ml_pipeline_workflow import ml_app as workflow
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
def format_namespace(namespace):
|
|
|
|
| 10 |
return namespace[-1].split(":")[0] if len(namespace) > 0 else "root graph"
|
| 11 |
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
def handle_like(data: gr.LikeData):
|
| 14 |
"""Like/dislike 이벤트 핸들러"""
|
| 15 |
if data.liked:
|
|
|
|
| 18 |
print(f"👎 Downvoted: {data.value}")
|
| 19 |
|
| 20 |
|
| 21 |
+
def generate_response(message, history):
|
| 22 |
"""ML Pipeline 워크플로우에 대한 nested thought 응답 생성"""
|
| 23 |
inputs = {
|
| 24 |
"messages": [HumanMessage(content=message)],
|
|
|
|
| 29 |
agent_header_ids = {} # Map agent names to their header message IDs
|
| 30 |
current_namespace = None # Track current namespace to detect changes
|
| 31 |
|
| 32 |
+
for namespace, chunk in workflow.stream(
|
| 33 |
inputs,
|
| 34 |
stream_mode="updates",
|
| 35 |
subgraphs=True
|
|
|
|
| 198 |
yield response
|
| 199 |
|
| 200 |
|
| 201 |
+
# Create interface with Blocks for like functionality
|
| 202 |
+
with gr.Blocks() as demo:
|
| 203 |
+
chatbot = gr.Chatbot(
|
| 204 |
type="messages",
|
| 205 |
show_copy_button=True,
|
| 206 |
show_copy_all_button=True,
|
| 207 |
show_share_button=True
|
| 208 |
)
|
| 209 |
+
chatbot.like(handle_like, None, None)
|
| 210 |
|
| 211 |
gr.ChatInterface(
|
| 212 |
+
generate_response,
|
| 213 |
type="messages",
|
| 214 |
+
chatbot=chatbot,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
title="🔬 ML Pipeline Automation",
|
| 216 |
description="데이터 추출, 사전학습, 파인튜닝, 평가 단계를 거치는 완전한 ML 파이프라인입니다. 각 전문가 에이전트의 작업 과정을 단계별로 확인할 수 있습니다.",
|
| 217 |
examples=[
|
|
|
|
| 233 |
cache_examples=False
|
| 234 |
)
|
| 235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
if __name__ == "__main__":
|
| 237 |
demo.launch(ssr_mode=False)
|
basic_chatbot_workflow.py
DELETED
|
@@ -1,57 +0,0 @@
|
|
| 1 |
-
from dotenv import load_dotenv
|
| 2 |
-
from langchain_openai import ChatOpenAI
|
| 3 |
-
from langgraph_supervisor import create_supervisor
|
| 4 |
-
from langgraph.prebuilt import create_react_agent
|
| 5 |
-
|
| 6 |
-
# Load environment variables from .env file
|
| 7 |
-
load_dotenv()
|
| 8 |
-
|
| 9 |
-
model = ChatOpenAI(model="gpt-4o")
|
| 10 |
-
|
| 11 |
-
# Create specialized agents
|
| 12 |
-
|
| 13 |
-
def add(a: float, b: float) -> float:
|
| 14 |
-
"""두 숫자를 더합니다."""
|
| 15 |
-
return a + b
|
| 16 |
-
|
| 17 |
-
def multiply(a: float, b: float) -> float:
|
| 18 |
-
"""두 숫자를 곱합니다."""
|
| 19 |
-
return a * b
|
| 20 |
-
|
| 21 |
-
def web_search(query: str) -> str:
|
| 22 |
-
"""웹에서 정보를 검색합니다."""
|
| 23 |
-
return (
|
| 24 |
-
"Here are the headcounts for each of the FAANG companies in 2024:\n"
|
| 25 |
-
"1. **Facebook (Meta)**: 67,317 employees.\n"
|
| 26 |
-
"2. **Apple**: 164,000 employees.\n"
|
| 27 |
-
"3. **Amazon**: 1,551,000 employees.\n"
|
| 28 |
-
"4. **Netflix**: 14,000 employees.\n"
|
| 29 |
-
"5. **Google (Alphabet)**: 181,269 employees."
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
math_agent = create_react_agent(
|
| 33 |
-
model=model,
|
| 34 |
-
tools=[add, multiply],
|
| 35 |
-
name="math_expert",
|
| 36 |
-
prompt="당신은 수학 전문가입니다. 항상 한 번에 하나의 도구만 사용하세요."
|
| 37 |
-
)
|
| 38 |
-
|
| 39 |
-
research_agent = create_react_agent(
|
| 40 |
-
model=model,
|
| 41 |
-
tools=[web_search],
|
| 42 |
-
name="research_expert",
|
| 43 |
-
prompt="당신은 웹 검색 기능을 가진 세계적인 수준의 연구자입니다. 수학 계산은 하지 마세요."
|
| 44 |
-
)
|
| 45 |
-
|
| 46 |
-
workflow = create_supervisor(
|
| 47 |
-
[research_agent, math_agent],
|
| 48 |
-
model=model,
|
| 49 |
-
prompt=(
|
| 50 |
-
"당신은 연구 전문가와 수학 전문가를 관리하는 팀 감독자입니다. "
|
| 51 |
-
"최신 이슈나 정보 검색이 필요한 경우 research_agent를 사용하세요. "
|
| 52 |
-
"수학 문제는 math_agent를 사용하세요."
|
| 53 |
-
)
|
| 54 |
-
)
|
| 55 |
-
|
| 56 |
-
# Compile and run
|
| 57 |
-
app = workflow.compile()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|