Spaces:
Sleeping
Sleeping
File size: 1,867 Bytes
7749760 183c26c a20ce1b c6d8fe0 69c6ca6 c324ceb c987001 8e5fe2d 38efabc 9163410 e39d1b4 83df412 c987001 9163410 0287c0f c987001 e39d1b4 c324ceb 9163410 e39d1b4 83df412 e39d1b4 183c26c c987001 183c26c 9163410 7749760 183c26c 7749760 f16bdea 9163410 69c6ca6 f16bdea 69c6ca6 7749760 f16bdea 183c26c c987001 f16bdea a20ce1b f16bdea | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | import os
import gradio as gr
import datetime
import pytz
from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# 1. SETUP LLM
hf_token = os.getenv("HF_TOKEN")
# Fix: Change task to "conversational" as required by the provider
llm = HuggingFaceInferenceAPI(
model_name="Qwen/Qwen2.5-7B-Instruct",
token=hf_token,
task="conversational",
provider="together",
is_function_calling_model=False
)
# 2. DEFINE YOUR TOOLS (Must be defined before the Agent)
def get_tokyo_time() -> str:
"""Returns the current time in Tokyo, Japan."""
tz = pytz.timezone('Asia/Tokyo')
return f"The current time in Tokyo is {datetime.datetime.now(tz).strftime('%H:%M:%S')}"
def multiply(a: float, b: float) -> float:
"""Multiplies two numbers (a and b) and returns the result."""
return a * b
# Wrap them in LlamaIndex Tool objects
tools = [
FunctionTool.from_defaults(fn=get_tokyo_time),
FunctionTool.from_defaults(fn=multiply)
]
# 3. THE "STABILITY" PROMPT
RE_ACT_PROMPT = """You are a helpful assistant.
For every query, you MUST follow this sequence:
Thought: <your reasoning>
Action: <tool_name>
Action Input: {"arg1": value}
Observation: <result from tool>
... (repeat if needed)
Thought: I have the final answer.
Answer: <your final response to the user>
"""
# 4. CREATE THE AGENT
# Now 'tools' and 'llm' are both correctly defined and configured
agent = ReActAgent.from_tools(
tools,
llm=llm,
verbose=True,
context=RE_ACT_PROMPT
)
# 5. GRADIO INTERFACE
def chat(message, history):
try:
response = agent.chat(message)
return str(response)
except Exception as e:
return f"System Error: {str(e)}"
gr.ChatInterface(chat, title="Unit 2: LlamaIndex Agent (Fixed)").launch() |