Wayne0102 commited on
Commit
ddc0a9f
·
verified ·
1 Parent(s): b77c490

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -47
app.py CHANGED
@@ -3,7 +3,6 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
6
- from gradio import ChatMessage
7
 
8
  # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
@@ -17,70 +16,96 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientMod
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
 
 
 
 
20
  # ==========================================
21
- # PART 1: AGENT LOGIC
22
  # ==========================================
23
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
 
 
 
 
 
 
24
  li_agent = AgentWorkflow.from_tools_or_functions(
25
- [FunctionTool.from_defaults(fn=lambda: f"Current time: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}")],
26
  llm=li_llm,
27
  )
28
 
29
- smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
30
- smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
 
 
 
 
31
 
32
  # ==========================================
33
- # PART 2: UI WRAPPERS (Gradio 6.0 Style)
34
  # ==========================================
35
- async def chat_llama(message, history):
36
- # Workflow-based logic
37
- result = await li_agent.run(user_msg=message)
38
- return str(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  def chat_smol(message, history):
41
- # We yield a 'Thought' block first
42
- yield ChatMessage(
43
- role="assistant",
44
- content="I am searching the web and writing Python code to verify the details...",
45
- metadata={"title": "🧠 Thinking Process", "status": "pending"}
46
- )
47
-
48
  try:
49
- # Run agent
50
  response = smol_agent.run(message)
51
- # Yield the final result (this replaces the previous yield in the UI)
52
- yield ChatMessage(role="assistant", content=str(response))
53
  except Exception as e:
54
- yield ChatMessage(role="assistant", content=f"❌ Error: {str(e)}")
55
 
56
  # ==========================================
57
- # PART 3: THE UI LAYOUT
58
  # ==========================================
59
- with gr.Blocks(fill_height=True) as demo:
60
- with gr.Row():
61
- gr.Markdown(f"# 🤖 Multi-Agent Hub\nRunning `{MODEL_ID}` via Together AI", elem_id="header")
 
 
 
62
 
63
  with gr.Tabs():
64
- with gr.Tab("🏗️ LlamaIndex (Workflow)"):
65
- gr.ChatInterface(
66
- fn=chat_llama,
67
- examples=["What time is it in Tokyo?"],
68
- # 'container' removed to fix your error
69
- )
70
- gr.Info("Best for structured, multi-step business logic.")
71
-
72
- with gr.Tab("💻 smolagents (Code)"):
73
- gr.ChatInterface(
74
- fn=chat_smol,
75
- examples=["Search for the latest stock price of Nvidia."],
76
- # Customizing the chatbot component to look cleaner
77
- chatbot=gr.Chatbot(label="Code Execution Agent", show_label=False)
78
- )
79
- gr.Warning("Best for dynamic problem solving and web searching.")
80
-
81
- # Launch with theme and CSS
 
 
 
 
 
 
 
82
  if __name__ == "__main__":
83
- demo.launch(
84
- theme=gr.themes.Soft(),
85
- css="#header { text-align: center; padding: 20px; }"
86
- )
 
3
  import datetime
4
  import pytz
5
  import asyncio
 
6
 
7
  # Framework 1: LlamaIndex
8
  from llama_index.core.agent.workflow import AgentWorkflow
 
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
18
 
19
+ custom_css = """
20
+ .agent-header { text-align: center; margin-bottom: 20px; }
21
+ """
22
+
23
  # ==========================================
24
+ # PART 1: LLAMAINDEX AGENT
25
  # ==========================================
26
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
27
+
28
+ def get_tokyo_time() -> str:
29
+ """Returns the current time in Tokyo, Japan."""
30
+ tz = pytz.timezone('Asia/Tokyo')
31
+ return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
32
+
33
  li_agent = AgentWorkflow.from_tools_or_functions(
34
+ [FunctionTool.from_defaults(fn=get_tokyo_time)],
35
  llm=li_llm,
36
  )
37
 
38
+ async def chat_llama(message, history):
39
+ try:
40
+ result = await li_agent.run(user_msg=message)
41
+ return str(result)
42
+ except Exception as e:
43
+ return f"❌ LlamaIndex Error: {str(e)}"
44
 
45
  # ==========================================
46
+ # PART 2: SMOLAGENTS
47
  # ==========================================
48
+ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
49
+
50
+ @tool
51
+ def weather_tool(location: str) -> str:
52
+ """
53
+ Get the current weather for a location.
54
+
55
+ Args:
56
+ location: The city and country, e.g., 'London, UK'.
57
+ """
58
+ return f"The weather in {location} is currently sunny and 22°C."
59
+
60
+ smol_agent = CodeAgent(
61
+ model=smol_model,
62
+ tools=[weather_tool, DuckDuckGoSearchTool()],
63
+ add_base_tools=True
64
+ )
65
 
66
  def chat_smol(message, history):
 
 
 
 
 
 
 
67
  try:
 
68
  response = smol_agent.run(message)
69
+ return str(response)
 
70
  except Exception as e:
71
+ return f"❌ Smolagents Error: {str(e)}"
72
 
73
  # ==========================================
74
+ # PART 3: REVISED GRADIO UI (v6.0 Compatible)
75
  # ==========================================
76
+ with gr.Blocks() as demo:
77
+
78
+ gr.Markdown(f"""
79
+ # 🤖 Multi-Framework Agent Playground
80
+ **Model:** `{MODEL_ID}` | **Provider:** `Together AI`
81
+ """, elem_classes="agent-header")
82
 
83
  with gr.Tabs():
84
+ # TAB 1: LLAMAINDEX
85
+ with gr.Tab("🏗️ LlamaIndex Workflow"):
86
+ with gr.Row():
87
+ with gr.Column(scale=4):
88
+ # Removed 'type' argument for compatibility
89
+ gr.ChatInterface(
90
+ fn=chat_llama,
91
+ examples=["What time is it in Tokyo?"]
92
+ )
93
+ with gr.Column(scale=1):
94
+ gr.Markdown("### Agent Specs")
95
+ gr.Info("LlamaIndex handles complex multi-step logic via event-driven workflows.")
96
+
97
+ # TAB 2: SMOLAGENTS
98
+ with gr.Tab("💻 smolagents CodeAgent"):
99
+ with gr.Row():
100
+ with gr.Column(scale=4):
101
+ gr.ChatInterface(
102
+ fn=chat_smol,
103
+ examples=["What is the weather in Paris?"]
104
+ )
105
+ with gr.Column(scale=1):
106
+ gr.Markdown("### Agent Specs")
107
+ gr.Warning("smolagents writes and executes Python code locally.")
108
+
109
  if __name__ == "__main__":
110
+ # Moved theme and css here for Gradio 6.0
111
+ demo.launch(theme=gr.themes.Soft(), css=custom_css)