YeeJun02 commited on
Commit
afc969d
·
verified ·
1 Parent(s): cd1a4ae

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -0
app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datetime
3
+ import pytz
4
+ import math
5
+ import requests
6
+ import gradio as gr
7
+ from deep_translator import GoogleTranslator
8
+
9
+ # --- LLAMAINDEX IMPORTS ---
10
+ from llama_index.core.agent import ReActAgent
11
+ from llama_index.core.tools import FunctionTool
12
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
13
+
14
+ # --- SMOLAGENTS IMPORTS ---
15
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, tool, InferenceClientModel
16
+
17
+ # 1. SHARED SETUP & TOKENS
18
+ # Ensure you set these in your Hugging Face Space Settings -> Secrets
19
+ HF_TOKEN = os.getenv("HF_TOKEN")
20
+
21
+ # ==========================================
22
+ # PART 1: LLAMAINDEX AGENT SETUP
23
+ # ==========================================
24
+ li_llm = HuggingFaceInferenceAPI(
25
+ model_name="Qwen/Qwen2.5-7B-Instruct",
26
+ token=HF_TOKEN,
27
+ task="conversational",
28
+ provider="together"
29
+ )
30
+
31
+ def get_tokyo_time_li() -> str:
32
+ """Returns the current time in Tokyo, Japan."""
33
+ tz = pytz.timezone('Asia/Tokyo')
34
+ return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
35
+
36
+ def multiply_li(a: float, b: float) -> float:
37
+ """Multiplies two numbers (a and b) and returns the result."""
38
+ return a * b
39
+
40
+ li_tools = [
41
+ FunctionTool.from_defaults(fn=get_tokyo_time_li),
42
+ FunctionTool.from_defaults(fn=multiply_li)
43
+ ]
44
+
45
+ RE_ACT_PROMPT = """You are a helpful assistant.
46
+ For every query, you MUST follow this sequence:
47
+ Thought: <your reasoning>
48
+ Action: <tool_name>
49
+ Action Input: {"arg1": value}
50
+ Observation: <result from tool>
51
+ ... (repeat if needed)
52
+ Thought: I have the final answer.
53
+ Answer: <your final response to the user>
54
+ """
55
+
56
+ li_agent = ReActAgent.from_tools(
57
+ li_tools,
58
+ llm=li_llm,
59
+ verbose=True,
60
+ context=RE_ACT_PROMPT
61
+ )
62
+
63
+ def li_chat_fn(message, history):
64
+ try:
65
+ return str(li_agent.chat(message))
66
+ except Exception as e:
67
+ return f"LlamaIndex Error: {str(e)}"
68
+
69
+ li_interface = gr.ChatInterface(li_chat_fn, title="LlamaIndex Agent")
70
+
71
+ # ==========================================
72
+ # PART 2: SMOLAGENTS SETUP
73
+ # ==========================================
74
+ smol_model = InferenceClientModel(
75
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
76
+ token=HF_TOKEN
77
+ )
78
+
79
+ @tool
80
+ def weather_tool(location: str) -> str:
81
+ """Get current weather for any location.
82
+ Args:
83
+ location: The name of the city.
84
+ """
85
+ geo_url = f"https://geocoding-api.open-meteo.com/v1/search?name={location}&count=1"
86
+ geo_res = requests.get(geo_url).json()
87
+ if not geo_res.get('results'): return "Location not found."
88
+ data = geo_res['results'][0]
89
+ w_url = f"https://api.open-meteo.com/v1/forecast?latitude={data['latitude']}&longitude={data['longitude']}&current=temperature_2m"
90
+ w_res = requests.get(w_url).json()
91
+ return f"Temp in {location}: {w_res['current']['temperature_2m']}°C"
92
+
93
+ smol_agent = CodeAgent(
94
+ model=smol_model,
95
+ tools=[weather_tool, DuckDuckGoSearchTool()],
96
+ additional_authorized_imports=['requests', 'math', 'pytz']
97
+ )
98
+
99
+ def smol_chat_fn(message, history):
100
+ try:
101
+ return str(smol_agent.run(message))
102
+ except Exception as e:
103
+ return f"Smolagents Error: {str(e)}"
104
+
105
+ smol_interface = gr.ChatInterface(smol_chat_fn, title="Smolagents Assistant")
106
+
107
+ # ==========================================
108
+ # PART 3: COMBINE INTO TABS
109
+ # ==========================================
110
+ demo = gr.TabbedInterface(
111
+ [li_interface, smol_interface],
112
+ ["LlamaIndex Agent", "Smolagents Agent"],
113
+ title="Multi-Framework Agent Comparison"
114
+ )
115
+
116
+ if __name__ == "__main__":
117
+ demo.launch()