Wayne0102 commited on
Commit
dca8e0e
·
verified ·
1 Parent(s): 2075f92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -33
app.py CHANGED
@@ -8,7 +8,7 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientMod
8
  from deep_translator import GoogleTranslator
9
  from llama_index.core.agent import ReActAgent
10
  from llama_index.core.tools import FunctionTool
11
- from llama_index.llms.huggingface import HuggingFaceInferenceAPI # Note: Import path might vary slightly based on version
12
 
13
  # ============ 1. SMOLAGENTS TOOLS ============
14
 
@@ -60,11 +60,7 @@ def weather_tool(location: str) -> str:
60
 
61
  # ============ AGENT CONFIGURATION ============
62
 
63
- HF_TOKEN = os.getenv("HF_TOKEN")
64
- if not HF_TOKEN:
65
- raise ValueError("HF_TOKEN is missing! Please add it in Space Settings -> Secrets.")
66
-
67
- # SmolAgents Setup
68
  smol_model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
69
 
70
  smol_agent = CodeAgent(
@@ -74,26 +70,16 @@ smol_agent = CodeAgent(
74
  )
75
 
76
  # LlamaIndex Setup
77
- # Use 'text-generation' for Instruct models
78
  llama_llm = HuggingFaceInferenceAPI(
79
  model_name="Qwen/Qwen2.5-7B-Instruct",
80
  token=HF_TOKEN,
81
- task="text-generation"
82
  )
83
 
84
- # Define real functions for LlamaIndex
85
- def get_tokyo_time() -> str:
86
- """Get the current time in Tokyo."""
87
- return f"Tokyo: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}"
88
-
89
- def multiply(a: float, b: float) -> float:
90
- """Multiply two numbers."""
91
- return a * b
92
-
93
  llama_agent = ReActAgent.from_tools(
94
  [
95
- FunctionTool.from_defaults(fn=get_tokyo_time),
96
- FunctionTool.from_defaults(fn=multiply)
97
  ],
98
  llm=llama_llm,
99
  verbose=True
@@ -102,28 +88,17 @@ llama_agent = ReActAgent.from_tools(
102
  # ============ GRADIO UI ============
103
 
104
  def chat_smol(message, history):
105
- try:
106
- # CodeAgent returns the result directly
107
- return str(smol_agent.run(message))
108
- except Exception as e:
109
- return f"Agent Error: {str(e)}"
110
 
111
  def chat_llama(message, history):
112
- try:
113
- response = llama_agent.chat(message)
114
- return str(response)
115
- except Exception as e:
116
- return f"Agent Error: {str(e)}"
117
 
118
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
119
  gr.Markdown("# 🤖 Multi-Framework AI Agent Space")
120
-
121
  with gr.Tab("Smolagents (Alfred)"):
122
- gr.Markdown("Uses `Qwen2.5-Coder-32B`. Capable of writing code to solve problems.")
123
  gr.ChatInterface(chat_smol)
124
-
125
  with gr.Tab("LlamaIndex (ReAct)"):
126
- gr.Markdown("Uses `Qwen2.5-7B`. Good for reasoning and simple tool use.")
127
  gr.ChatInterface(chat_llama)
128
 
129
  if __name__ == "__main__":
 
8
  from deep_translator import GoogleTranslator
9
  from llama_index.core.agent import ReActAgent
10
  from llama_index.core.tools import FunctionTool
11
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
12
 
13
  # ============ 1. SMOLAGENTS TOOLS ============
14
 
 
60
 
61
  # ============ AGENT CONFIGURATION ============
62
 
63
+ HF_TOKEN = os.getenv("HF_TOKEN", "")
 
 
 
 
64
  smol_model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", token=HF_TOKEN)
65
 
66
  smol_agent = CodeAgent(
 
70
  )
71
 
72
  # LlamaIndex Setup
 
73
  llama_llm = HuggingFaceInferenceAPI(
74
  model_name="Qwen/Qwen2.5-7B-Instruct",
75
  token=HF_TOKEN,
76
+ task="conversational"
77
  )
78
 
 
 
 
 
 
 
 
 
 
79
  llama_agent = ReActAgent.from_tools(
80
  [
81
+ FunctionTool.from_defaults(fn=lambda: f"Tokyo: {datetime.datetime.now(pytz.timezone('Asia/Tokyo'))}", name="get_tokyo_time"),
82
+ FunctionTool.from_defaults(fn=lambda a, b: a * b, name="multiply")
83
  ],
84
  llm=llama_llm,
85
  verbose=True
 
88
  # ============ GRADIO UI ============
89
 
90
  def chat_smol(message, history):
91
+ return str(smol_agent.run(message))
 
 
 
 
92
 
93
  def chat_llama(message, history):
94
+ response = llama_agent.chat(message)
95
+ return str(response)
 
 
 
96
 
97
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
98
  gr.Markdown("# 🤖 Multi-Framework AI Agent Space")
 
99
  with gr.Tab("Smolagents (Alfred)"):
 
100
  gr.ChatInterface(chat_smol)
 
101
  with gr.Tab("LlamaIndex (ReAct)"):
 
102
  gr.ChatInterface(chat_llama)
103
 
104
  if __name__ == "__main__":