Commit
·
809f87e
1
Parent(s):
cb5664b
langgraph with openai + seperate llamaindex with subfolder
Browse files- app.py +14 -86
- langgraph_dir/agent.py +129 -0
- langgraph_dir/config.py +1 -0
- langgraph_dir/custom_tools.py +33 -0
- langgraph_dir/prompt.py +9 -0
- llamaindex_dir/agent.py +74 -0
- config.py → llamaindex_dir/config.py +1 -1
- custom_tools.py → llamaindex_dir/custom_tools.py +0 -2
- prompt.py → llamaindex_dir/prompt.py +0 -0
- requirements.txt +4 -1
app.py
CHANGED
|
@@ -3,96 +3,16 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import pandas as pd
|
| 5 |
|
| 6 |
-
from llama_index.core import PromptTemplate
|
| 7 |
-
from llama_index.core.workflow import Context
|
| 8 |
-
from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallResult
|
| 9 |
-
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # customized to support different provider
|
| 10 |
-
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 11 |
-
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
| 12 |
-
from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
|
| 13 |
-
|
| 14 |
-
from config import HF_MODEL_NAME, HF_PROVIDER
|
| 15 |
-
from prompt import custom_react_system_header_str
|
| 16 |
-
from custom_tools import query_image_tool, automatic_speech_recognition_tool
|
| 17 |
|
| 18 |
# (Keep Constants as is)
|
| 19 |
# --- Constants ---
|
| 20 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 21 |
|
| 22 |
-
# --- Basic Agent Definition ---
|
| 23 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 24 |
-
class BasicAgent:
|
| 25 |
-
def __init__(self):
|
| 26 |
-
print("BasicAgent initialized.")
|
| 27 |
-
def __call__(self, question: str) -> str:
|
| 28 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 29 |
-
fixed_answer = "This is a default answer."
|
| 30 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
| 31 |
-
return fixed_answer
|
| 32 |
-
|
| 33 |
-
class LLamaIndexAgent:
|
| 34 |
-
def __init__(self,
|
| 35 |
-
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 36 |
-
provider="hf-inference",
|
| 37 |
-
show_tools_desc=True,
|
| 38 |
-
show_prompt=True):
|
| 39 |
-
|
| 40 |
-
# LLM definition
|
| 41 |
-
llm = HuggingFaceInferenceAPI(model_name=model_name,
|
| 42 |
-
provider=provider)
|
| 43 |
-
print(f"LLamaIndexAgent initialized with model \"{model_name}\"")
|
| 44 |
-
|
| 45 |
-
# tools definition
|
| 46 |
-
tool_spec_list = []
|
| 47 |
-
tool_spec_list += WikipediaToolSpec().to_tool_list()
|
| 48 |
-
tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
|
| 49 |
-
tool_spec_list += CodeInterpreterToolSpec().to_tool_list()
|
| 50 |
-
tool_spec_list += [query_image_tool, automatic_speech_recognition_tool]
|
| 51 |
-
|
| 52 |
-
# agent definition
|
| 53 |
-
self.agent = ReActAgent(llm=llm, tools=tool_spec_list)
|
| 54 |
-
|
| 55 |
-
# update default prompt with a custom one
|
| 56 |
-
custom_react_system_header = PromptTemplate(custom_react_system_header_str)
|
| 57 |
-
self.agent.update_prompts({"react_header": custom_react_system_header})
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
|
|
|
| 61 |
|
| 62 |
-
if show_tools_desc:
|
| 63 |
-
for i, tool in enumerate(tool_spec_list):
|
| 64 |
-
print("\n" + "="*30 + f" Tool {i+1} " + "="*30)
|
| 65 |
-
print(tool.metadata.description)
|
| 66 |
-
|
| 67 |
-
if show_prompt:
|
| 68 |
-
prompt_dict = self.agent.get_prompts()
|
| 69 |
-
for k, v in prompt_dict.items():
|
| 70 |
-
print("\n" + "="*30 + f" Prompt: {k} " + "="*30)
|
| 71 |
-
print(v.template)
|
| 72 |
-
|
| 73 |
-
async def __call__(self, question: str) -> str:
|
| 74 |
-
print("\n\n"+"*"*50)
|
| 75 |
-
print(f"Agent received question: {question}")
|
| 76 |
-
print("*"*50)
|
| 77 |
-
|
| 78 |
-
handler = self.agent.run(question, ctx=self.ctx)
|
| 79 |
-
async for ev in handler.stream_events():
|
| 80 |
-
# if isinstance(ev, ToolCallResult):
|
| 81 |
-
# print(f"\nCall {ev.tool_name} with {ev.tool_kwargs}\nReturned: {ev.tool_output}")
|
| 82 |
-
if isinstance(ev, AgentStream):
|
| 83 |
-
print(f"{ev.delta}", end="", flush=True)
|
| 84 |
-
|
| 85 |
-
response = await handler
|
| 86 |
-
|
| 87 |
-
# post-process the response (cast AgentOutput to str and keep only what's after "FINAL ANSWER:" for the exact match)
|
| 88 |
-
response = str(response)
|
| 89 |
-
try:
|
| 90 |
-
response = response.split("FINAL ANSWER:")[-1].strip()
|
| 91 |
-
except:
|
| 92 |
-
print('Could not split response on "FINAL ANSWER:"')
|
| 93 |
-
print("\n\n"+"-"*50)
|
| 94 |
-
print(f"Agent returning with answer: {response}")
|
| 95 |
-
return response
|
| 96 |
|
| 97 |
async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 98 |
"""
|
|
@@ -115,9 +35,17 @@ async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
|
| 115 |
|
| 116 |
# 1. Instantiate Agent (modify this part to create your agent)
|
| 117 |
try:
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
except Exception as e:
|
| 122 |
print(f"Error instantiating agent: {e}")
|
| 123 |
return f"Error initializing agent: {e}", None
|
|
|
|
| 3 |
import requests
|
| 4 |
import pandas as pd
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
| 9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# --- Choice of framework (either "langgraph" or "llamaindex") ---
|
| 13 |
+
# FRAMEWORK = 'langgraph'
|
| 14 |
+
FRAMEWORK = 'llamaindex'
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
async def run_and_submit_all(profile: gr.OAuthProfile | None):
|
| 18 |
"""
|
|
|
|
| 35 |
|
| 36 |
# 1. Instantiate Agent (modify this part to create your agent)
|
| 37 |
try:
|
| 38 |
+
if FRAMEWORK == 'langgraph':
|
| 39 |
+
from langgraph_dir.config import OPENAI_MODEL_NAME
|
| 40 |
+
from langgraph_dir.agent import LangGraphAgent
|
| 41 |
+
agent = LangGraphAgent(model_name=OPENAI_MODEL_NAME)
|
| 42 |
+
elif FRAMEWORK == 'llamaindex':
|
| 43 |
+
from llamaindex_dir.config import HF_MODEL_NAME, HF_PROVIDER
|
| 44 |
+
from llamaindex_dir.agent import LLamaIndexAgent
|
| 45 |
+
agent = LLamaIndexAgent(model_name=HF_MODEL_NAME, provider=HF_PROVIDER)
|
| 46 |
+
else:
|
| 47 |
+
raise AttributeError(
|
| 48 |
+
f"FRAMEWORK can either be 'langgraph' or 'llamaindex', received: '{FRAMEWORK}'")
|
| 49 |
except Exception as e:
|
| 50 |
print(f"Error instantiating agent: {e}")
|
| 51 |
return f"Error initializing agent: {e}", None
|
langgraph_dir/agent.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal
|
| 2 |
+
|
| 3 |
+
from langchain_openai import ChatOpenAI
|
| 4 |
+
from langgraph.graph import MessagesState
|
| 5 |
+
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
|
| 6 |
+
from langgraph.graph import StateGraph, START, END
|
| 7 |
+
|
| 8 |
+
from .prompt import system_prompt
|
| 9 |
+
from .custom_tools import multiply, add, divide
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class LangGraphAgent:
|
| 13 |
+
def __init__(self,
|
| 14 |
+
model_name="gpt-4.1-nano",
|
| 15 |
+
show_tools_desc=True,
|
| 16 |
+
show_prompt=True):
|
| 17 |
+
|
| 18 |
+
# =========== LLM definition ===========
|
| 19 |
+
llm = ChatOpenAI(model=model_name, temperature=0)
|
| 20 |
+
print(f"LangGraphAgent initialized with model \"{model_name}\"")
|
| 21 |
+
|
| 22 |
+
# =========== Augment the LLM with tools ===========
|
| 23 |
+
tools = [add, multiply, divide]
|
| 24 |
+
tools_by_name = {tool.name: tool for tool in tools}
|
| 25 |
+
llm_with_tools = llm.bind_tools(tools)
|
| 26 |
+
|
| 27 |
+
# tool_spec_list += WikipediaToolSpec().to_tool_list()
|
| 28 |
+
# tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
|
| 29 |
+
# tool_spec_list += CodeInterpreterToolSpec().to_tool_list()
|
| 30 |
+
# tool_spec_list += [query_image_tool, automatic_speech_recognition_tool]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# =========== Agent definition ===========
|
| 34 |
+
|
| 35 |
+
# Nodes
|
| 36 |
+
def llm_call(state: MessagesState):
|
| 37 |
+
"""LLM decides whether to call a tool or not"""
|
| 38 |
+
|
| 39 |
+
return {
|
| 40 |
+
"messages": [
|
| 41 |
+
llm_with_tools.invoke(
|
| 42 |
+
[
|
| 43 |
+
SystemMessage(
|
| 44 |
+
content=system_prompt
|
| 45 |
+
)
|
| 46 |
+
]
|
| 47 |
+
+ state["messages"]
|
| 48 |
+
)
|
| 49 |
+
]
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def tool_node(state: dict):
|
| 53 |
+
"""Performs the tool call"""
|
| 54 |
+
|
| 55 |
+
result = []
|
| 56 |
+
for tool_call in state["messages"][-1].tool_calls:
|
| 57 |
+
tool = tools_by_name[tool_call["name"]]
|
| 58 |
+
observation = tool.invoke(tool_call["args"])
|
| 59 |
+
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
|
| 60 |
+
return {"messages": result}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
|
| 64 |
+
def should_continue(state: MessagesState) -> Literal["environment", END]:
|
| 65 |
+
"""Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
|
| 66 |
+
|
| 67 |
+
messages = state["messages"]
|
| 68 |
+
last_message = messages[-1]
|
| 69 |
+
# If the LLM makes a tool call, then perform an action
|
| 70 |
+
if last_message.tool_calls:
|
| 71 |
+
return "Action"
|
| 72 |
+
# Otherwise, we stop (reply to the user)
|
| 73 |
+
return END
|
| 74 |
+
|
| 75 |
+
# Build workflow
|
| 76 |
+
agent_builder = StateGraph(MessagesState)
|
| 77 |
+
|
| 78 |
+
# Add nodes
|
| 79 |
+
agent_builder.add_node("llm_call", llm_call)
|
| 80 |
+
agent_builder.add_node("environment", tool_node)
|
| 81 |
+
|
| 82 |
+
# Add edges to connect nodes
|
| 83 |
+
agent_builder.add_edge(START, "llm_call")
|
| 84 |
+
agent_builder.add_conditional_edges(
|
| 85 |
+
"llm_call",
|
| 86 |
+
should_continue,
|
| 87 |
+
{
|
| 88 |
+
# Name returned by should_continue : Name of next node to visit
|
| 89 |
+
"Action": "environment",
|
| 90 |
+
END: END,
|
| 91 |
+
},
|
| 92 |
+
)
|
| 93 |
+
agent_builder.add_edge("environment", "llm_call")
|
| 94 |
+
|
| 95 |
+
# Compile the agent
|
| 96 |
+
self.agent = agent_builder.compile()
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# if show_tools_desc:
|
| 100 |
+
# for i, tool in enumerate(tool_spec_list):
|
| 101 |
+
# print("\n" + "="*30 + f" Tool {i+1} " + "="*30)
|
| 102 |
+
# print(tool.metadata.description)
|
| 103 |
+
|
| 104 |
+
# if show_prompt:
|
| 105 |
+
# prompt_dict = self.agent.get_prompts()
|
| 106 |
+
# for k, v in prompt_dict.items():
|
| 107 |
+
# print("\n" + "="*30 + f" Prompt: {k} " + "="*30)
|
| 108 |
+
# print(v.template)
|
| 109 |
+
|
| 110 |
+
def __call__(self, question: str) -> str:
|
| 111 |
+
print("\n\n"+"*"*50)
|
| 112 |
+
print(f"Agent received question: {question}")
|
| 113 |
+
print("*"*50)
|
| 114 |
+
|
| 115 |
+
# Invoke
|
| 116 |
+
messages = [HumanMessage(content=question)]
|
| 117 |
+
messages = self.agent.invoke({"messages": messages})
|
| 118 |
+
for m in messages["messages"]:
|
| 119 |
+
m.pretty_print()
|
| 120 |
+
|
| 121 |
+
# post-process the response (keep only what's after "FINAL ANSWER:" for the exact match)
|
| 122 |
+
response = str(messages["messages"][-1].content)
|
| 123 |
+
try:
|
| 124 |
+
response = response.split("FINAL ANSWER:")[-1].strip()
|
| 125 |
+
except:
|
| 126 |
+
print('Could not split response on "FINAL ANSWER:"')
|
| 127 |
+
print("\n\n"+"-"*50)
|
| 128 |
+
print(f"Agent returning with answer: {response}")
|
| 129 |
+
return response
|
langgraph_dir/config.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
OPENAI_MODEL_NAME = "gpt-4.1-nano"
|
langgraph_dir/custom_tools.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_core.tools import tool
|
| 2 |
+
|
| 3 |
+
@tool
|
| 4 |
+
def multiply(a: int, b: int) -> int:
|
| 5 |
+
"""Multiply a and b.
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
a: first int
|
| 9 |
+
b: second int
|
| 10 |
+
"""
|
| 11 |
+
return a * b
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@tool
|
| 15 |
+
def add(a: int, b: int) -> int:
|
| 16 |
+
"""Adds a and b.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
a: first int
|
| 20 |
+
b: second int
|
| 21 |
+
"""
|
| 22 |
+
return a + b
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@tool
|
| 26 |
+
def divide(a: int, b: int) -> float:
|
| 27 |
+
"""Divide a and b.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
a: first int
|
| 31 |
+
b: second int
|
| 32 |
+
"""
|
| 33 |
+
return a / b
|
langgraph_dir/prompt.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# customized GAIA system prompt
|
| 2 |
+
system_prompt = """\
|
| 3 |
+
You are a general AI assistant with tools.
|
| 4 |
+
I will ask you a question. Use your tools, and answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. \
|
| 5 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
| 6 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
|
| 7 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
| 8 |
+
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
| 9 |
+
"""
|
llamaindex_dir/agent.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from llama_index.core import PromptTemplate
|
| 2 |
+
from llama_index.core.workflow import Context
|
| 3 |
+
from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallResult
|
| 4 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # customized to support different provider
|
| 5 |
+
from llama_index.tools.wikipedia import WikipediaToolSpec
|
| 6 |
+
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
| 7 |
+
from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
|
| 8 |
+
|
| 9 |
+
from .prompt import custom_react_system_header_str
|
| 10 |
+
from .custom_tools import query_image_tool, automatic_speech_recognition_tool
|
| 11 |
+
|
| 12 |
+
class LLamaIndexAgent:
|
| 13 |
+
def __init__(self,
|
| 14 |
+
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 15 |
+
provider="hf-inference",
|
| 16 |
+
show_tools_desc=True,
|
| 17 |
+
show_prompt=True):
|
| 18 |
+
|
| 19 |
+
# LLM definition
|
| 20 |
+
llm = HuggingFaceInferenceAPI(model_name=model_name,
|
| 21 |
+
provider=provider)
|
| 22 |
+
print(f"LLamaIndexAgent initialized with model \"{model_name}\"")
|
| 23 |
+
|
| 24 |
+
# tools definition
|
| 25 |
+
tool_spec_list = []
|
| 26 |
+
tool_spec_list += WikipediaToolSpec().to_tool_list()
|
| 27 |
+
tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
|
| 28 |
+
tool_spec_list += CodeInterpreterToolSpec().to_tool_list()
|
| 29 |
+
tool_spec_list += [query_image_tool, automatic_speech_recognition_tool]
|
| 30 |
+
|
| 31 |
+
# agent definition
|
| 32 |
+
self.agent = ReActAgent(llm=llm, tools=tool_spec_list)
|
| 33 |
+
|
| 34 |
+
# update default prompt with a custom one
|
| 35 |
+
custom_react_system_header = PromptTemplate(custom_react_system_header_str)
|
| 36 |
+
self.agent.update_prompts({"react_header": custom_react_system_header})
|
| 37 |
+
|
| 38 |
+
# context definition
|
| 39 |
+
self.ctx = Context(self.agent)
|
| 40 |
+
|
| 41 |
+
if show_tools_desc:
|
| 42 |
+
for i, tool in enumerate(tool_spec_list):
|
| 43 |
+
print("\n" + "="*30 + f" Tool {i+1} " + "="*30)
|
| 44 |
+
print(tool.metadata.description)
|
| 45 |
+
|
| 46 |
+
if show_prompt:
|
| 47 |
+
prompt_dict = self.agent.get_prompts()
|
| 48 |
+
for k, v in prompt_dict.items():
|
| 49 |
+
print("\n" + "="*30 + f" Prompt: {k} " + "="*30)
|
| 50 |
+
print(v.template)
|
| 51 |
+
|
| 52 |
+
async def __call__(self, question: str) -> str:
|
| 53 |
+
print("\n\n"+"*"*50)
|
| 54 |
+
print(f"Agent received question: {question}")
|
| 55 |
+
print("*"*50)
|
| 56 |
+
|
| 57 |
+
handler = self.agent.run(question, ctx=self.ctx)
|
| 58 |
+
async for ev in handler.stream_events():
|
| 59 |
+
# if isinstance(ev, ToolCallResult):
|
| 60 |
+
# print(f"\nCall {ev.tool_name} with {ev.tool_kwargs}\nReturned: {ev.tool_output}")
|
| 61 |
+
if isinstance(ev, AgentStream):
|
| 62 |
+
print(f"{ev.delta}", end="", flush=True)
|
| 63 |
+
|
| 64 |
+
response = await handler
|
| 65 |
+
|
| 66 |
+
# post-process the response (cast AgentOutput to str and keep only what's after "FINAL ANSWER:" for the exact match)
|
| 67 |
+
response = str(response)
|
| 68 |
+
try:
|
| 69 |
+
response = response.split("FINAL ANSWER:")[-1].strip()
|
| 70 |
+
except:
|
| 71 |
+
print('Could not split response on "FINAL ANSWER:"')
|
| 72 |
+
print("\n\n"+"-"*50)
|
| 73 |
+
print(f"Agent returning with answer: {response}")
|
| 74 |
+
return response
|
config.py → llamaindex_dir/config.py
RENAMED
|
@@ -5,4 +5,4 @@ HF_PROVIDER = "nebius"
|
|
| 5 |
# HF_PROVIDER = "hf-inference"
|
| 6 |
|
| 7 |
# HF_MODEL_NAME = "Qwen/Qwen3-32B"
|
| 8 |
-
# HF_PROVIDER = "hf-inference"
|
|
|
|
| 5 |
# HF_PROVIDER = "hf-inference"
|
| 6 |
|
| 7 |
# HF_MODEL_NAME = "Qwen/Qwen3-32B"
|
| 8 |
+
# HF_PROVIDER = "hf-inference"
|
custom_tools.py → llamaindex_dir/custom_tools.py
RENAMED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
# custom tools
|
| 2 |
-
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
from llama_index.core.tools import FunctionTool
|
| 5 |
|
|
|
|
|
|
|
|
|
|
| 1 |
from huggingface_hub import InferenceClient
|
| 2 |
from llama_index.core.tools import FunctionTool
|
| 3 |
|
prompt.py → llamaindex_dir/prompt.py
RENAMED
|
File without changes
|
requirements.txt
CHANGED
|
@@ -4,4 +4,7 @@ llama-index
|
|
| 4 |
llama-index-llms-huggingface-api @ git+https://github.com/guillaumefrd/llama_index.git@add-provider-HF-API#subdirectory=llama-index-integrations/llms/llama-index-llms-huggingface-api
|
| 5 |
llama_index.tools.wikipedia
|
| 6 |
llama_index.tools.duckduckgo
|
| 7 |
-
llama_index.tools.code_interpreter
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
llama-index-llms-huggingface-api @ git+https://github.com/guillaumefrd/llama_index.git@add-provider-HF-API#subdirectory=llama-index-integrations/llms/llama-index-llms-huggingface-api
|
| 5 |
llama_index.tools.wikipedia
|
| 6 |
llama_index.tools.duckduckgo
|
| 7 |
+
llama_index.tools.code_interpreter
|
| 8 |
+
langchain
|
| 9 |
+
langgraph
|
| 10 |
+
langchain-openai
|