YeeJun02 commited on
Commit
0c3a810
·
verified ·
1 Parent(s): 0463f36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -31
app.py CHANGED
@@ -1,56 +1,97 @@
1
  import os
2
  import gradio as gr
 
 
 
 
 
 
 
3
  from llama_index.core.agent import ReActAgent
4
  from llama_index.core.tools import FunctionTool
5
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
6
- from smolagents import CodeAgent, HfApiModel, DuckDuckGoSearchTool
7
 
8
- # 1. SETUP LLM
 
 
 
 
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
 
11
- # --- LLAMAINDEX SETUP ---
 
 
12
  li_llm = HuggingFaceInferenceAPI(
13
- model_name="Qwen/Qwen2.5-7B-Instruct",
14
  token=HF_TOKEN,
15
  task="conversational"
16
  )
17
 
18
- # Define simple tools for LlamaIndex
19
- def multiply(a: float, b: float) -> float:
20
- """Multiplies two numbers."""
21
- return a * b
22
 
23
- li_tools = [FunctionTool.from_defaults(fn=multiply)]
24
 
25
- # Fix: Ensure correct initialization
26
  li_agent = ReActAgent.from_tools(
27
- tools=li_tools,
28
- llm=li_llm,
29
  verbose=True
30
  )
31
 
32
- # --- SMOLAGENTS SETUP ---
33
- smol_model = HfApiModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
34
- smol_agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=smol_model)
35
-
36
- # 2. DEFINE WRAPPER FUNCTIONS FOR GRADIO
37
  def chat_llama(message, history):
38
- # LlamaIndex .chat() keeps its own history if using the same agent object
39
- response = li_agent.chat(message)
40
- return str(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  def chat_smol(message, history):
43
- # smolagents .run()
44
- response = smol_agent.run(message)
45
- return str(response)
46
-
47
- # 3. COMBINE INTO TABS
48
- with gr.Blocks() as demo:
49
- gr.Markdown("# Dual Agent Interface: LlamaIndex vs Smolagents")
50
- with gr.Tab("LlamaIndex (ReAct)"):
51
- gr.ChatInterface(chat_llama)
52
- with gr.Tab("Smolagents (Code)"):
53
- gr.ChatInterface(chat_smol)
 
 
 
 
 
 
 
 
 
54
 
55
  if __name__ == "__main__":
56
  demo.launch()
 
1
  import os
2
  import gradio as gr
3
+ import datetime
4
+ import pytz
5
+ import math
6
+ import requests
7
+ from deep_translator import GoogleTranslator
8
+
9
+ # Framework 1: LlamaIndex
10
  from llama_index.core.agent import ReActAgent
11
  from llama_index.core.tools import FunctionTool
12
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
 
13
 
14
+ # Framework 2: smolagents
15
+ # FIXED IMPORT: HfApiModel is now InferenceClientModel
16
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
17
+
18
+ # 0. SHARED CONFIG
19
  HF_TOKEN = os.getenv("HF_TOKEN")
20
 
21
+ # ==========================================
22
+ # PART 1: LLAMAINDEX AGENT
23
+ # ==========================================
24
  li_llm = HuggingFaceInferenceAPI(
25
+ model_name="Qwen/Qwen2.5-7B-Instruct",
26
  token=HF_TOKEN,
27
  task="conversational"
28
  )
29
 
30
+ def get_tokyo_time() -> str:
31
+ """Returns the current time in Tokyo."""
32
+ tz = pytz.timezone('Asia/Tokyo')
33
+ return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
34
 
35
+ li_tools = [FunctionTool.from_defaults(fn=get_tokyo_time)]
36
 
37
+ # Initializing via constructor to avoid Pydantic __getattr__ issues
38
  li_agent = ReActAgent.from_tools(
39
+ tools=li_tools,
40
+ llm=li_llm,
41
  verbose=True
42
  )
43
 
 
 
 
 
 
44
  def chat_llama(message, history):
45
+ try:
46
+ response = li_agent.chat(message)
47
+ return str(response)
48
+ except Exception as e:
49
+ return f"LlamaIndex Error: {str(e)}"
50
+
51
+ # ==========================================
52
+ # PART 2: SMOLAGENTS
53
+ # ==========================================
54
+ # Using the corrected model class
55
+ smol_model = InferenceClientModel(
56
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
57
+ token=HF_TOKEN
58
+ )
59
+
60
+ @tool
61
+ def weather_tool(location: str) -> str:
62
+ """Get the current weather for a location.
63
+ Args:
64
+ location: The city name.
65
+ """
66
+ return f"The weather in {location} is sunny and 25°C."
67
+
68
+ smol_agent = CodeAgent(
69
+ model=smol_model,
70
+ tools=[weather_tool, DuckDuckGoSearchTool()],
71
+ additional_authorized_imports=['math', 'requests']
72
+ )
73
 
74
  def chat_smol(message, history):
75
+ try:
76
+ response = smol_agent.run(message)
77
+ return str(response)
78
+ except Exception as e:
79
+ return f"Smolagents Error: {str(e)}"
80
+
81
+ # ==========================================
82
+ # PART 3: UNIFIED GRADIO UI
83
+ # ==========================================
84
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
85
+ gr.Markdown("# 🤖 Multi-Framework Agent Space")
86
+ gr.Markdown("Compare how different agentic frameworks handle your requests.")
87
+
88
+ with gr.Tab("LlamaIndex (ReAct Agent)"):
89
+ gr.Markdown("This agent uses the classic **Reasoning + Acting** text loop.")
90
+ gr.ChatInterface(fn=chat_llama)
91
+
92
+ with gr.Tab("smolagents (Code Agent)"):
93
+ gr.Markdown("This agent solves tasks by writing and executing **Python code**.")
94
+ gr.ChatInterface(fn=chat_smol)
95
 
96
  if __name__ == "__main__":
97
  demo.launch()