Wayne0102 commited on
Commit
38efabc
·
verified ·
1 Parent(s): d613b79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -42
app.py CHANGED
@@ -6,73 +6,60 @@ from llama_index.core.tools import FunctionTool
6
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
7
 
8
  # --- 1. SET UP THE LLM ---
9
- # Use a strong model that supports tool calling/logic
10
- # You need your Hugging Face Token here
11
  HF_TOKEN = "your_hf_token_here"
 
 
12
  llm = HuggingFaceInferenceAPI(
13
  model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
14
  token=HF_TOKEN
15
  )
16
 
17
  # --- 2. DEFINE THE TOOLS ---
18
- # LlamaIndex uses docstrings and type hints to explain tools to the LLM.
19
-
20
  def get_tokyo_time() -> str:
21
  """Useful for when you need to know the current time in Tokyo, Japan."""
22
  tz = pytz.timezone('Asia/Tokyo')
23
  now = datetime.datetime.now(tz)
24
- return f"The current time in Tokyo is {now.strftime('%Y-%m-%d %H:%M:%S')} JST"
 
 
 
 
 
25
 
26
  def calculate(expression: str) -> str:
27
- """Useful for performing mathematical calculations. Input should be a math string like '2+2'."""
28
  try:
29
- # Simple safety check
30
  allowed_chars = "0123456789+-*/(). "
31
  if not all(char in allowed_chars for char in expression):
32
- return "Error: Invalid characters."
33
  return str(eval(expression))
34
  except Exception as e:
35
- return f"Calculation error: {str(e)}"
36
 
37
- # Define a knowledge base tool
38
- def search_knowledge_base(query: str) -> str:
39
- """Useful for answering questions about AI, ML, LLM, RAG, and Hugging Face."""
40
- knowledge = {
41
- "ai": "Artificial Intelligence is machines performing human-like tasks.",
42
- "ml": "Machine Learning is a subset of AI where algorithms learn from data.",
43
- "llm": "Large Language Models are AI systems trained on vast text.",
44
- "rag": "RAG combines document retrieval with AI generation.",
45
- "hugging face": "A platform for sharing AI models and datasets."
46
- }
47
- query_lower = query.lower()
48
- for key, value in knowledge.items():
49
- if key in query_lower:
50
- return f"{key.upper()}: {value}"
51
- return "I don't have that specific info in my knowledge base, but I can try to answer using my general knowledge."
52
 
53
- # Wrap them as LlamaIndex Tools
54
- time_tool = FunctionTool.from_defaults(fn=get_tokyo_time)
55
- calc_tool = FunctionTool.from_defaults(fn=calculate)
56
- kb_tool = FunctionTool.from_defaults(fn=search_knowledge_base)
57
-
58
- # --- 3. INITIALIZE THE AGENT ---
59
- agent = ReActAgent.from_tools(
60
- [time_tool, calc_tool, kb_tool],
61
- llm=llm,
62
- verbose=True # This lets you see the "Thought" process in the console
63
- )
64
 
65
- # --- 4. GRADIO INTERFACE ---
66
- def agent_chat(message, history):
67
- # LlamaIndex agents handle chat history automatically if you use chat()
68
  response = agent.chat(message)
69
  return str(response)
70
 
71
  demo = gr.ChatInterface(
72
- fn=agent_chat,
73
- title="🤖 LlamaIndex Tokyo Agent",
74
- description="Ask me about the time in Tokyo, math, or AI definitions!",
75
- examples=["What is the time in Tokyo?", "Calculate 152 * 4", "Explain what RAG is."]
76
  )
77
 
78
  if __name__ == "__main__":
 
6
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
7
 
8
  # --- 1. SET UP THE LLM ---
9
+ # Replace with your actual HF Token (Settings -> Access Tokens)
 
10
  HF_TOKEN = "your_hf_token_here"
11
+
12
+ # We use Qwen2.5-Coder because it is excellent at logic and tool-calling
13
  llm = HuggingFaceInferenceAPI(
14
  model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
15
  token=HF_TOKEN
16
  )
17
 
18
  # --- 2. DEFINE THE TOOLS ---
 
 
19
  def get_tokyo_time() -> str:
20
  """Useful for when you need to know the current time in Tokyo, Japan."""
21
  tz = pytz.timezone('Asia/Tokyo')
22
  now = datetime.datetime.now(tz)
23
+ return f"The current time in Tokyo is {now.strftime('%H:%M:%S')} (JST)."
24
+
25
+ def get_local_time() -> str:
26
+ """Useful for when you need to know the current time on the server's local machine."""
27
+ now = datetime.datetime.now()
28
+ return f"The local server time is {now.strftime('%H:%M:%S')}."
29
 
30
  def calculate(expression: str) -> str:
31
+ """Useful for math calculations. Input should be a math string like '12 * 5' or '100 / 4'."""
32
  try:
33
+ # Basic security check
34
  allowed_chars = "0123456789+-*/(). "
35
  if not all(char in allowed_chars for char in expression):
36
+ return "Error: Invalid math characters."
37
  return str(eval(expression))
38
  except Exception as e:
39
+ return f"Error: {str(e)}"
40
 
41
+ # Wrap functions into LlamaIndex tools
42
+ tools = [
43
+ FunctionTool.from_defaults(fn=get_tokyo_time),
44
+ FunctionTool.from_defaults(fn=get_local_time),
45
+ FunctionTool.from_defaults(fn=calculate)
46
+ ]
 
 
 
 
 
 
 
 
 
47
 
48
+ # --- 3. INITIALIZE THE REACT AGENT ---
49
+ # This agent will "think" step-by-step to choose the right tool
50
+ agent = ReActAgent.from_tools(tools, llm=llm, verbose=True)
 
 
 
 
 
 
 
 
51
 
52
+ # --- 4. GRADIO CHAT INTERFACE ---
53
+ def chat_with_agent(message, history):
54
+ # The agent.chat function manages the conversation logic
55
  response = agent.chat(message)
56
  return str(response)
57
 
58
  demo = gr.ChatInterface(
59
+ fn=chat_with_agent,
60
+ title="🦙 LlamaIndex Tokyo Agent",
61
+ description="I am a LlamaIndex-powered agent. I can tell you Tokyo time and do math!",
62
+ examples=["What is the time in Tokyo?", "What is 15 * 15?", "What time is it locally?"]
63
  )
64
 
65
  if __name__ == "__main__":