Wayne0102 commited on
Commit
f1f2114
·
verified ·
1 Parent(s): 5eb195f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -77
app.py CHANGED
@@ -3,12 +3,13 @@ import gradio as gr
3
  import datetime
4
  import pytz
5
  import asyncio
6
- from gradio import ChatMessage
7
 
8
- # Frameworks
9
  from llama_index.core.agent.workflow import AgentWorkflow
10
  from llama_index.core.tools import FunctionTool
11
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
 
 
12
  from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
13
 
14
  # 0. SHARED CONFIG
@@ -16,7 +17,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
16
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
17
 
18
  # ==========================================
19
- # AGENT SETUP
20
  # ==========================================
21
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
22
 
@@ -26,91 +27,79 @@ def get_tokyo_time() -> str:
26
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
27
 
28
  def get_weather(location: str) -> str:
29
- """Get the current weather for a specific location. Args: location: The city name."""
30
- return f"The weather in {location} is currently sunny and 22°C."
 
 
 
 
 
31
 
32
- li_agent = AgentWorkflow.from_tools_or_functions(
33
- [FunctionTool.from_defaults(fn=get_tokyo_time), FunctionTool.from_defaults(fn=get_weather)],
34
- llm=li_llm
35
- )
36
 
37
- smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
38
- smol_agent = CodeAgent(model=smol_model, tools=[DuckDuckGoSearchTool()])
 
 
 
 
 
 
39
 
40
  # ==========================================
41
- # ENHANCED CHAT LOGIC
42
  # ==========================================
43
- async def chat_llama(message, history, system_prompt):
44
- # LlamaIndex doesn't natively stream thoughts as easily as smolagents in this setup,
45
- # so we just return the final result.
46
- result = await li_agent.run(user_msg=f"{system_prompt}\n\nUser: {message}")
47
- return str(result)
48
-
49
- def chat_smol(message, history, system_prompt):
50
- # 1. Yield a 'Thinking' state first
51
- yield [
52
- ChatMessage(role="user", content=message),
53
- ChatMessage(
54
- role="assistant",
55
- content="Writing Python code to analyze your request...",
56
- metadata={"title": "🧠 CodeAgent Thinking", "status": "pending"}
57
- )
58
- ]
59
-
 
 
60
  try:
61
- # 2. Run the agent logic
62
- response = smol_agent.run(f"{system_prompt}\n\nUser: {message}")
63
-
64
- # 3. Final Answer
65
- yield [
66
- ChatMessage(role="user", content=message),
67
- ChatMessage(role="assistant", content=str(response))
68
- ]
69
  except Exception as e:
70
- yield [ChatMessage(role="assistant", content=f"❌ Error: {str(e)}")]
71
 
72
  # ==========================================
73
- # GRADIO 6.0 UI DESIGN
74
  # ==========================================
75
- with gr.Blocks(fill_height=True, title="Agent Workspace") as demo:
 
 
76
 
77
- # Sidebar for Global Control
78
- with gr.Sidebar():
79
- gr.Markdown("# ⚙️ Configuration")
80
- system_input = gr.Textbox(
81
- label="System Instructions",
82
- placeholder="e.g. You are a helpful assistant that speaks like a pirate.",
83
- lines=4
84
- )
85
- gr.Markdown("---")
86
- gr.Markdown("### 🛠️ Active Tools")
87
- gr.Markdown("- 🕒 Tokyo Time\n- ☀️ Weather (LlamaIndex)\n- 🔍 DuckDuckGo (smolagents)")
88
- gr.Divider()
89
- gr.Info("LlamaIndex focuses on 'Workflows', while smolagents focuses on 'Code Generation'.")
90
-
91
- # Main Area
92
- with gr.Column():
93
- gr.Markdown(f"# 🤖 AI Agent Hub\n**Model:** `{MODEL_ID}`")
94
 
95
- with gr.Tabs():
96
- # TAB 1: LLAMAINDEX
97
- with gr.Tab("🏗️ LlamaIndex Workflow"):
98
- gr.ChatInterface(
99
- fn=chat_llama,
100
- additional_inputs=[system_input],
101
- examples=["What time is it in Tokyo?", "Weather in London"],
102
- type="messages" # Gradio 6.0 style
103
- )
104
-
105
- # TAB 2: SMOLAGENTS
106
- with gr.Tab("💻 smolagents (CodeAgent)"):
107
- # Manual Chatbot for 'Thinking' metadata support
108
- smol_chatbot = gr.Chatbot(label="smol-code-agent", type="messages", avatar_images=(None, "https://raw.githubusercontent.com/huggingface/smolagents/main/docs/source/imgs/smolagents_logo.png"))
109
- smol_msg = gr.Textbox(placeholder="Ask me to search or calculate something...", label="Input")
110
-
111
- # Handling events manually for the 'Thinking' yield
112
- smol_msg.submit(chat_smol, [smol_msg, smol_chatbot, system_input], [smol_chatbot])
113
- smol_msg.submit(lambda: "", None, [smol_msg]) # Clear textbox
114
 
115
  if __name__ == "__main__":
116
- demo.launch(theme=gr.themes.Soft())
 
 
 
 
 
3
  import datetime
4
  import pytz
5
  import asyncio
 
6
 
7
+ # Framework 1: LlamaIndex
8
  from llama_index.core.agent.workflow import AgentWorkflow
9
  from llama_index.core.tools import FunctionTool
10
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
11
+
12
+ # Framework 2: smolagents
13
  from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
14
 
15
  # 0. SHARED CONFIG
 
17
  MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
18
 
19
  # ==========================================
20
+ # PART 1: LLAMAINDEX AGENT
21
  # ==========================================
22
  li_llm = HuggingFaceInferenceAPI(model_name=MODEL_ID, token=HF_TOKEN, provider="together")
23
 
 
27
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
28
 
29
  def get_weather(location: str) -> str:
30
+ """
31
+ Get the current weather for a specific location.
32
+
33
+ Args:
34
+ location: The city name to check weather for.
35
+ """
36
+ return f"LlamaIndex Tool: The weather in {location} is currently sunny and 22°C."
37
 
38
+ li_tools = [
39
+ FunctionTool.from_defaults(fn=get_tokyo_time),
40
+ FunctionTool.from_defaults(fn=get_weather) # Added weather tool here
41
+ ]
42
 
43
+ li_agent = AgentWorkflow.from_tools_or_functions(li_tools, llm=li_llm)
44
+
45
+ async def chat_llama(message, history):
46
+ try:
47
+ result = await li_agent.run(user_msg=message)
48
+ return str(result)
49
+ except Exception as e:
50
+ return f"❌ LlamaIndex Error: {str(e)}"
51
 
52
  # ==========================================
53
+ # PART 2: SMOLAGENTS
54
  # ==========================================
55
+ smol_model = InferenceClientModel(model_id=MODEL_ID, token=HF_TOKEN, provider="together")
56
+
57
+ @tool
58
+ def weather_tool(location: str) -> str:
59
+ """
60
+ Get the current weather for a location.
61
+
62
+ Args:
63
+ location: The city and country, e.g., 'London, UK'.
64
+ """
65
+ return f"smolagents Tool: The weather in {location} is currently sunny and 22°C."
66
+
67
+ smol_agent = CodeAgent(
68
+ model=smol_model,
69
+ tools=[weather_tool, DuckDuckGoSearchTool()],
70
+ add_base_tools=True
71
+ )
72
+
73
+ def chat_smol(message, history):
74
  try:
75
+ response = smol_agent.run(message)
76
+ return str(response)
 
 
 
 
 
 
77
  except Exception as e:
78
+ return f"❌ Smolagents Error: {str(e)}"
79
 
80
  # ==========================================
81
+ # PART 3: GRADIO 6.0 UI
82
  # ==========================================
83
+ with gr.Blocks() as demo:
84
+ gr.Markdown("# 🤖 Consolidated AI Agent Space", elem_id="main-title")
85
+ gr.Markdown(f"Currently using **{MODEL_ID}** via Together AI Provider.")
86
 
87
+ with gr.Tabs():
88
+ with gr.Tab("🏗️ LlamaIndex (Workflow)"):
89
+ gr.ChatInterface(
90
+ fn=chat_llama,
91
+ examples=["What's the weather in Tokyo?", "What time is it in Japan?"]
92
+ )
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ with gr.Tab("💻 smolagents (CodeAgent)"):
95
+ gr.ChatInterface(
96
+ fn=chat_smol,
97
+ examples=["Search for the latest AI news", "How is the weather in Paris?"]
98
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  if __name__ == "__main__":
101
+ # Gradio 6.0: Move theme/css to launch()
102
+ demo.launch(
103
+ theme=gr.themes.Soft(),
104
+ css="#main-title { text-align: center; margin-bottom: 20px; }"
105
+ )