Wayne0102 commited on
Commit
75921f1
·
verified ·
1 Parent(s): bb84637

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -54
app.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
 
6
 
7
  # Framework 1: LlamaIndex
8
  from llama_index.core.agent.workflow import AgentWorkflow
@@ -16,79 +17,81 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientMod
16
  HF_TOKEN = os.getenv("HF_TOKEN")
17
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
18
 
19
- # CUSTOM CSS for centering the header
20
- custom_css = ".agent-header { text-align: center; margin-bottom: 20px; }"
21
-
22
  # ==========================================
23
- # PART 1: LLAMAINDEX AGENT
24
  # ==========================================
25
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
26
-
27
- def get_tokyo_time() -> str:
28
- """Returns the current time in Tokyo, Japan."""
29
- tz = pytz.timezone('Asia/Tokyo')
30
- return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
31
-
32
  li_agent = AgentWorkflow.from_tools_or_functions(
33
- [FunctionTool.from_defaults(fn=get_tokyo_time)],
34
  llm=li_llm,
35
  )
36
 
37
- async def chat_llama(message, history):
38
- try:
39
- # history is now a list of dicts, but LlamaIndex run() handles the new message
40
- result = await li_agent.run(user_msg=message)
41
- return str(result)
42
- except Exception as e:
43
- return f"❌ LlamaIndex Error: {str(e)}"
44
 
45
  # ==========================================
46
- # PART 2: SMOLAGENTS
47
  # ==========================================
48
- smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
 
 
 
 
49
 
50
- @tool
51
- def weather_tool(location: str) -> str:
52
  """
53
- Get the current weather for a location.
54
-
55
- Args:
56
- location: The city and country, e.g., 'London, UK'.
57
  """
58
- return f"The weather in {location} is currently sunny and 22°C."
59
-
60
- smol_agent = CodeAgent(
61
- model=smol_model,
62
- tools=[weather_tool, DuckDuckGoSearchTool()]
63
- )
64
-
65
- def chat_smol(message, history):
66
  try:
67
- # smol_agent.run returns the final answer string
68
  response = smol_agent.run(message)
69
- return str(response)
 
70
  except Exception as e:
71
- return f"❌ Smolagents Error: {str(e)}"
72
 
73
  # ==========================================
74
- # PART 3: GRADIO 6.0 UI
75
  # ==========================================
76
- with gr.Blocks() as demo:
77
- gr.Markdown(f"# 🤖 AI Agent Playground\n**Model:** `{MODEL_ID}`", elem_classes="agent-header")
78
-
79
- with gr.Tabs():
80
- with gr.Tab("🏗️ LlamaIndex (Workflows)"):
81
- gr.ChatInterface(
82
- fn=chat_llama,
83
- examples=["What time is it in Tokyo?"]
84
- )
 
 
 
85
 
86
- with gr.Tab("💻 smolagents (Code Execution)"):
87
- gr.ChatInterface(
88
- fn=chat_smol,
89
- examples=["What is the weather in Paris?", "Search for 2026 AI trends."]
90
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  if __name__ == "__main__":
93
- # In Gradio 6.0, theme and css MUST go in launch()
94
- demo.launch(theme=gr.themes.Soft(), css=custom_css)
 
3
  import datetime
4
  import pytz
5
  import asyncio
6
+ from typing import List, Dict
7
 
8
  # Framework 1: LlamaIndex
9
  from llama_index.core.agent.workflow import AgentWorkflow
 
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
19
 
 
 
 
20
  # ==========================================
21
+ # AGENT SETUP
22
  # ==========================================
23
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
 
 
 
 
 
 
24
  li_agent = AgentWorkflow.from_tools_or_functions(
25
+ [FunctionTool.from_defaults(fn=lambda: f"Time: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}")],
26
  llm=li_llm,
27
  )
28
 
29
+ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
30
+ smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
 
 
 
 
 
31
 
32
  # ==========================================
33
+ # CHAT LOGIC (Gradio 6.0 Compatible)
34
  # ==========================================
35
+ async def chat_llama(message, history):
36
+ # LlamaIndex run
37
+ result = await li_agent.run(user_msg=message)
38
+ # We yield the final response
39
+ yield str(result)
40
 
41
+ def chat_smol(message, history):
 
42
  """
43
+ Simulates a 'Thinking' block before the final answer.
 
 
 
44
  """
45
+ # 1. Send an initial 'Thinking' message using Gradio 6.0 Metadata
46
+ yield gr.ChatMessage(
47
+ role="assistant",
48
+ content="Searching for information and executing code...",
49
+ metadata={"title": "🧠 Agent is thinking"}
50
+ )
51
+
 
52
  try:
53
+ # 2. Run the actual agent
54
  response = smol_agent.run(message)
55
+ # 3. Yield the final answer as a normal message
56
+ yield str(response)
57
  except Exception as e:
58
+ yield f"❌ Error: {str(e)}"
59
 
60
  # ==========================================
61
+ # ENHANCED UI LAYOUT
62
  # ==========================================
63
+ with gr.Blocks(fill_height=True) as demo:
64
+ # Sidebar for Specs
65
+ with gr.Sidebar():
66
+ gr.Markdown("## 🛠️ Global Settings")
67
+ gr.Dropdown([MODEL_ID], label="Active Model", value=MODEL_ID)
68
+ gr.Markdown("---")
69
+ gr.Markdown("### Agent Capabilities")
70
+ gr.CheckboxGroup(["Search", "Time", "Python Code"], label="Tools Enabled", value=["Search", "Time", "Python Code"], interactive=False)
71
+ gr.Info("LlamaIndex is best for structured workflows. smolagents is best for dynamic code tasks.")
72
+
73
+ with gr.Column():
74
+ gr.Markdown(f"# 🤖 Multi-Agent Hub")
75
 
76
+ with gr.Tabs():
77
+ # TAB 1: LLAMAINDEX
78
+ with gr.Tab("🏗️ LlamaIndex Workflow"):
79
+ gr.ChatInterface(
80
+ fn=chat_llama,
81
+ description="Specializes in persistent workflows and structured tool calling.",
82
+ examples=["What's the current time in Tokyo?"],
83
+ container=False
84
+ )
85
+
86
+ # TAB 2: SMOLAGENTS
87
+ with gr.Tab("💻 smolagents CodeAgent"):
88
+ gr.ChatInterface(
89
+ fn=chat_smol,
90
+ description="Writes and runs Python code to solve complex reasoning tasks.",
91
+ examples=["How many people live in Paris and New York combined?"],
92
+ container=False
93
+ )
94
 
95
  if __name__ == "__main__":
96
+ # Using a professional monochrome theme for the UI
97
+ demo.launch(theme=gr.themes.Monochrome())