Wayne0102 commited on
Commit
b77c490
·
verified ·
1 Parent(s): 75921f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -49
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
6
- from typing import List, Dict
7
 
8
  # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
@@ -18,11 +18,11 @@ HF_TOKEN = os.getenv("HF_TOKEN")
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
20
  # ==========================================
21
- # AGENT SETUP
22
  # ==========================================
23
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
24
  li_agent = AgentWorkflow.from_tools_or_functions(
25
- [FunctionTool.from_defaults(fn=lambda: f"Time: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}")],
26
  llm=li_llm,
27
  )
28
 
@@ -30,68 +30,57 @@ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="t
30
  smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
31
 
32
  # ==========================================
33
- # CHAT LOGIC (Gradio 6.0 Compatible)
34
  # ==========================================
35
  async def chat_llama(message, history):
36
- # LlamaIndex run
37
  result = await li_agent.run(user_msg=message)
38
- # We yield the final response
39
- yield str(result)
40
 
41
  def chat_smol(message, history):
42
- """
43
- Simulates a 'Thinking' block before the final answer.
44
- """
45
- # 1. Send an initial 'Thinking' message using Gradio 6.0 Metadata
46
- yield gr.ChatMessage(
47
  role="assistant",
48
- content="Searching for information and executing code...",
49
- metadata={"title": "🧠 Agent is thinking"}
50
  )
51
 
52
  try:
53
- # 2. Run the actual agent
54
  response = smol_agent.run(message)
55
- # 3. Yield the final answer as a normal message
56
- yield str(response)
57
  except Exception as e:
58
- yield f"❌ Error: {str(e)}"
59
 
60
  # ==========================================
61
- # ENHANCED UI LAYOUT
62
  # ==========================================
63
  with gr.Blocks(fill_height=True) as demo:
64
- # Sidebar for Specs
65
- with gr.Sidebar():
66
- gr.Markdown("## 🛠️ Global Settings")
67
- gr.Dropdown([MODEL_ID], label="Active Model", value=MODEL_ID)
68
- gr.Markdown("---")
69
- gr.Markdown("### Agent Capabilities")
70
- gr.CheckboxGroup(["Search", "Time", "Python Code"], label="Tools Enabled", value=["Search", "Time", "Python Code"], interactive=False)
71
- gr.Info("LlamaIndex is best for structured workflows. smolagents is best for dynamic code tasks.")
72
 
73
- with gr.Column():
74
- gr.Markdown(f"# 🤖 Multi-Agent Hub")
75
-
76
- with gr.Tabs():
77
- # TAB 1: LLAMAINDEX
78
- with gr.Tab("🏗️ LlamaIndex Workflow"):
79
- gr.ChatInterface(
80
- fn=chat_llama,
81
- description="Specializes in persistent workflows and structured tool calling.",
82
- examples=["What's the current time in Tokyo?"],
83
- container=False
84
- )
85
 
86
- # TAB 2: SMOLAGENTS
87
- with gr.Tab("💻 smolagents CodeAgent"):
88
- gr.ChatInterface(
89
- fn=chat_smol,
90
- description="Writes and runs Python code to solve complex reasoning tasks.",
91
- examples=["How many people live in Paris and New York combined?"],
92
- container=False
93
- )
94
 
 
95
  if __name__ == "__main__":
96
- # Using a professional monochrome theme for the UI
97
- demo.launch(theme=gr.themes.Monochrome())
 
 
 
3
  import datetime
4
  import pytz
5
  import asyncio
6
+ from gradio import ChatMessage
7
 
8
  # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
 
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
20
  # ==========================================
21
+ # PART 1: AGENT LOGIC
22
  # ==========================================
23
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
24
  li_agent = AgentWorkflow.from_tools_or_functions(
25
+ [FunctionTool.from_defaults(fn=lambda: f"Current time: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}")],
26
  llm=li_llm,
27
  )
28
 
 
30
  smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
31
 
32
  # ==========================================
33
+ # PART 2: UI WRAPPERS (Gradio 6.0 Style)
34
  # ==========================================
35
  async def chat_llama(message, history):
36
+ # Workflow-based logic
37
  result = await li_agent.run(user_msg=message)
38
+ return str(result)
 
39
 
40
  def chat_smol(message, history):
41
+ # We yield a 'Thought' block first
42
+ yield ChatMessage(
 
 
 
43
  role="assistant",
44
+ content="I am searching the web and writing Python code to verify the details...",
45
+ metadata={"title": "🧠 Thinking Process", "status": "pending"}
46
  )
47
 
48
  try:
49
+ # Run agent
50
  response = smol_agent.run(message)
51
+ # Yield the final result (this replaces the previous yield in the UI)
52
+ yield ChatMessage(role="assistant", content=str(response))
53
  except Exception as e:
54
+ yield ChatMessage(role="assistant", content=f"❌ Error: {str(e)}")
55
 
56
  # ==========================================
57
+ # PART 3: THE UI LAYOUT
58
  # ==========================================
59
  with gr.Blocks(fill_height=True) as demo:
60
+ with gr.Row():
61
+ gr.Markdown(f"# 🤖 Multi-Agent Hub\nRunning `{MODEL_ID}` via Together AI", elem_id="header")
 
 
 
 
 
 
62
 
63
+ with gr.Tabs():
64
+ with gr.Tab("🏗️ LlamaIndex (Workflow)"):
65
+ gr.ChatInterface(
66
+ fn=chat_llama,
67
+ examples=["What time is it in Tokyo?"],
68
+ # 'container' removed to fix your error
69
+ )
70
+ gr.Info("Best for structured, multi-step business logic.")
 
 
 
 
71
 
72
+ with gr.Tab("💻 smolagents (Code)"):
73
+ gr.ChatInterface(
74
+ fn=chat_smol,
75
+ examples=["Search for the latest stock price of Nvidia."],
76
+ # Customizing the chatbot component to look cleaner
77
+ chatbot=gr.Chatbot(label="Code Execution Agent", show_label=False)
78
+ )
79
+ gr.Warning("Best for dynamic problem solving and web searching.")
80
 
81
+ # Launch with theme and CSS
82
  if __name__ == "__main__":
83
+ demo.launch(
84
+ theme=gr.themes.Soft(),
85
+ css="#header { text-align: center; padding: 20px; }"
86
+ )