Wayne0102 commited on
Commit
c6d8fe0
·
verified ·
1 Parent(s): 4391349

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -18
app.py CHANGED
@@ -2,14 +2,11 @@ import os
2
  import gradio as gr
3
  import datetime
4
  import pytz
5
- import os
6
- from llama_index.core.agent import ReActAgent # <--- MUST be this exact import
7
  from llama_index.core.tools import FunctionTool
8
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
9
- # ... rest of your imports
10
 
11
  # 1. Setup LLM
12
- # Reminder: Add HF_TOKEN to your Space "Secrets" in Settings
13
  hf_token = os.getenv("HF_TOKEN")
14
 
15
  llm = HuggingFaceInferenceAPI(
@@ -17,9 +14,9 @@ llm = HuggingFaceInferenceAPI(
17
  token=hf_token
18
  )
19
 
20
- # 2. Define your Tools
21
  def get_tokyo_time() -> str:
22
- """Returns the current time in Tokyo, Japan."""
23
  tz = pytz.timezone('Asia/Tokyo')
24
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
25
 
@@ -27,14 +24,13 @@ def multiply(a: float, b: float) -> float:
27
  """Multiplies two numbers and returns the result."""
28
  return a * b
29
 
30
- # Wrap tools
31
  tools = [
32
  FunctionTool.from_defaults(fn=get_tokyo_time),
33
  FunctionTool.from_defaults(fn=multiply)
34
  ]
35
 
36
- # 3. Create the Agent
37
- # Note: Using ReActAgent.from_tools is correct for the core class
38
  agent = ReActAgent.from_tools(
39
  tools,
40
  llm=llm,
@@ -43,15 +39,8 @@ agent = ReActAgent.from_tools(
43
 
44
  # 4. Gradio Interface
45
  def chat(message, history):
46
- # The agent handles the conversation state internally
47
  response = agent.chat(message)
48
  return str(response)
49
 
50
- demo = gr.ChatInterface(
51
- fn=chat,
52
- title="Unit 2: LlamaIndex Agent",
53
- description="I am a LlamaIndex agent with access to Tokyo time and multiplication tools."
54
- )
55
-
56
- if __name__ == "__main__":
57
- demo.launch()
 
2
  import gradio as gr
3
  import datetime
4
  import pytz
5
+ from llama_index.core.agent import ReActAgent
 
6
  from llama_index.core.tools import FunctionTool
7
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
 
8
 
9
  # 1. Setup LLM
 
10
  hf_token = os.getenv("HF_TOKEN")
11
 
12
  llm = HuggingFaceInferenceAPI(
 
14
  token=hf_token
15
  )
16
 
17
+ # 2. Define Tools
18
  def get_tokyo_time() -> str:
19
+ """Useful for when you need to know the current time in Tokyo, Japan."""
20
  tz = pytz.timezone('Asia/Tokyo')
21
  return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
22
 
 
24
  """Multiplies two numbers and returns the result."""
25
  return a * b
26
 
 
27
  tools = [
28
  FunctionTool.from_defaults(fn=get_tokyo_time),
29
  FunctionTool.from_defaults(fn=multiply)
30
  ]
31
 
32
+ # 3. Create the Agent (The "Classic" Core version)
33
+ # This will now work correctly with .from_tools()
34
  agent = ReActAgent.from_tools(
35
  tools,
36
  llm=llm,
 
39
 
40
  # 4. Gradio Interface
41
  def chat(message, history):
42
+ # ReActAgent.chat() is synchronous and preserves conversation history
43
  response = agent.chat(message)
44
  return str(response)
45
 
46
+ gr.ChatInterface(chat, title="Unit 2: LlamaIndex Agent").launch()