Wayne0102 commited on
Commit
f16bdea
·
verified ·
1 Parent(s): 83df412

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -9
app.py CHANGED
@@ -9,7 +9,7 @@ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
9
  # 1. SETUP LLM
10
  hf_token = os.getenv("HF_TOKEN")
11
 
12
- # We use the 7B model because it's faster and more available on the free tier
13
  llm = HuggingFaceInferenceAPI(
14
  model_name="Qwen/Qwen2.5-7B-Instruct",
15
  token=hf_token,
@@ -32,22 +32,35 @@ tools = [
32
  FunctionTool.from_defaults(fn=multiply)
33
  ]
34
 
35
- # 3. CREATE THE AGENT
36
- # The system prompt ensures the AI follows the ReAct pattern
 
 
 
 
 
 
 
 
 
 
 
 
37
  agent = ReActAgent.from_tools(
38
  tools,
39
  llm=llm,
40
- verbose=True
 
41
  )
42
 
43
- # 4. GRADIO INTERFACE
44
  def chat(message, history):
45
  try:
46
- # Use .chat() to maintain memory
47
  response = agent.chat(message)
48
  return str(response)
49
  except Exception as e:
50
- # This will show you exactly what is failing in the Gradio UI
51
- return f"Error: {str(e)}"
52
 
53
- gr.ChatInterface(chat, title="Unit 2: LlamaIndex Agent").launch()
 
9
  # 1. SETUP LLM
10
  hf_token = os.getenv("HF_TOKEN")
11
 
12
+ # We use the 7B model because it's much faster and reliable for the free tier
13
  llm = HuggingFaceInferenceAPI(
14
  model_name="Qwen/Qwen2.5-7B-Instruct",
15
  token=hf_token,
 
32
  FunctionTool.from_defaults(fn=multiply)
33
  ]
34
 
35
+ # 3. THE "STABILITY" PROMPT
36
+ # This prevents the AI from freezing by giving it a clear pattern to follow.
37
+ RE_ACT_PROMPT = """You are a helpful assistant.
38
+ For every query, you MUST follow this sequence:
39
+ Thought: <your reasoning>
40
+ Action: <tool_name>
41
+ Action Input: {"arg1": value}
42
+ Observation: <result from tool>
43
+ ... (repeat if needed)
44
+ Thought: I have the final answer.
45
+ Answer: <your final response to the user>
46
+ """
47
+
48
+ # 4. CREATE THE AGENT
49
  agent = ReActAgent.from_tools(
50
  tools,
51
  llm=llm,
52
+ verbose=True,
53
+ context=RE_ACT_PROMPT
54
  )
55
 
56
+ # 5. GRADIO INTERFACE
57
  def chat(message, history):
58
  try:
59
+ # Use .chat() to maintain the conversation flow
60
  response = agent.chat(message)
61
  return str(response)
62
  except Exception as e:
63
+ # If it still fails, this will tell us WHY in the chat window
64
+ return f"System Error: {str(e)}"
65
 
66
+ gr.ChatInterface(chat, title="Unit 2: LlamaIndex Agent (Fixed)").launch()