feat: added custom prompts
Browse files- app.py +17 -12
- prompts.yaml +50 -0
app.py
CHANGED
|
@@ -6,6 +6,7 @@ from llama_index.core.workflow import Context
|
|
| 6 |
from llama_index.core.agent.workflow import AgentStream, ToolCallResult
|
| 7 |
import gradio as gr
|
| 8 |
import asyncio
|
|
|
|
| 9 |
|
| 10 |
|
| 11 |
agent = None
|
|
@@ -13,36 +14,40 @@ ctx = None
|
|
| 13 |
|
| 14 |
async def initialize_agent(google_api_key, folder_path="./dummy_data", model="gemini-2.5-flash-lite"):
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
|
|
|
| 18 |
llm = GoogleGenAI(model=model, api_key=google_api_key)
|
|
|
|
|
|
|
| 19 |
rag = RAG(llm=llm)
|
| 20 |
-
|
| 21 |
rag.load_and_index_folder(folder_path)
|
| 22 |
|
| 23 |
rag_agent_tools = rag.get_tools()
|
| 24 |
-
|
|
|
|
| 25 |
mcp_client = JournalMCPClient()
|
| 26 |
mcp_tools = await mcp_client.get_tools()
|
| 27 |
|
| 28 |
-
|
| 29 |
agent = ReActAgent(
|
| 30 |
tools=[*rag_agent_tools, *mcp_tools],
|
| 31 |
llm=llm,
|
| 32 |
verbose=True,
|
| 33 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
ctx = Context(agent)
|
| 35 |
-
|
| 36 |
-
# agent_state["agent"] = agent
|
| 37 |
-
# agent_state["ctx"] = ctx
|
| 38 |
|
| 39 |
|
| 40 |
async def run_agent(query_text, chat_history):
|
| 41 |
|
| 42 |
-
global agent
|
| 43 |
-
|
| 44 |
-
# agent = agent_state["agent"]
|
| 45 |
-
# ctx = agent_state["ctx"]
|
| 46 |
|
| 47 |
handler = agent.run(query_text, ctx=ctx)
|
| 48 |
|
|
|
|
| 6 |
from llama_index.core.agent.workflow import AgentStream, ToolCallResult
|
| 7 |
import gradio as gr
|
| 8 |
import asyncio
|
| 9 |
+
import yaml
|
| 10 |
|
| 11 |
|
| 12 |
agent = None
|
|
|
|
| 14 |
|
| 15 |
async def initialize_agent(google_api_key, folder_path="./dummy_data", model="gemini-2.5-flash-lite"):
|
| 16 |
|
| 17 |
+
global agent, ctx
|
| 18 |
+
|
| 19 |
+
# get llm
|
| 20 |
llm = GoogleGenAI(model=model, api_key=google_api_key)
|
| 21 |
+
|
| 22 |
+
# load RAG
|
| 23 |
rag = RAG(llm=llm)
|
|
|
|
| 24 |
rag.load_and_index_folder(folder_path)
|
| 25 |
|
| 26 |
rag_agent_tools = rag.get_tools()
|
| 27 |
+
|
| 28 |
+
# load MCP tools
|
| 29 |
mcp_client = JournalMCPClient()
|
| 30 |
mcp_tools = await mcp_client.get_tools()
|
| 31 |
|
| 32 |
+
# create agent
|
| 33 |
agent = ReActAgent(
|
| 34 |
tools=[*rag_agent_tools, *mcp_tools],
|
| 35 |
llm=llm,
|
| 36 |
verbose=True,
|
| 37 |
)
|
| 38 |
+
|
| 39 |
+
# update prompt
|
| 40 |
+
with open("prompts.yaml", "r") as f:
|
| 41 |
+
prompts_dict = yaml.safe_load(f)
|
| 42 |
+
agent.update_prompts({"react_header": prompts_dict["react_header"]})
|
| 43 |
+
|
| 44 |
+
# set context
|
| 45 |
ctx = Context(agent)
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
|
| 48 |
async def run_agent(query_text, chat_history):
|
| 49 |
|
| 50 |
+
global agent, ctx
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
handler = agent.run(query_text, ctx=ctx)
|
| 53 |
|
prompts.yaml
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
react_header: |
|
| 2 |
+
You are designed to help with a variety of tasks, from answering questions to providing summaries to other types of analyses.
|
| 3 |
+
|
| 4 |
+
## Tools
|
| 5 |
+
|
| 6 |
+
You have access to a wide variety of tools. You are responsible for using the tools in any sequence you deem appropriate to complete the task at hand.
|
| 7 |
+
This may require breaking the task into subtasks and using different tools to complete each subtask.
|
| 8 |
+
|
| 9 |
+
You have access to the following tools:
|
| 10 |
+
{tool_desc}
|
| 11 |
+
|
| 12 |
+
NOTE: WHEN MCP TOOLS DOESN'T HELP, SWITCH TO RAG TOOLS TO ANSWER THE QUESTION.
|
| 13 |
+
|
| 14 |
+
## Output Format
|
| 15 |
+
|
| 16 |
+
Please answer in the same language as the question and use the following format:
|
| 17 |
+
|
| 18 |
+
```
|
| 19 |
+
Thought: The current language of the user is: (user's language). I need to use a tool to help me answer the question.
|
| 20 |
+
Action: tool name (one of {tool_names}) if using a tool.
|
| 21 |
+
Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
Please ALWAYS start with a Thought.
|
| 25 |
+
|
| 26 |
+
NEVER surround your response with markdown code markers. You may use code markers within your response if you need to.
|
| 27 |
+
|
| 28 |
+
Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
|
| 29 |
+
|
| 30 |
+
If this format is used, the tool will respond in the following format:
|
| 31 |
+
|
| 32 |
+
```
|
| 33 |
+
Observation: tool response
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
You should keep repeating the above format till you have enough information to answer the question without using any more tools. At that point, you MUST respond in one of the following two formats:
|
| 37 |
+
|
| 38 |
+
```
|
| 39 |
+
Thought: I can answer without using any more tools. I'll use the user's language to answer
|
| 40 |
+
Answer: [your answer here (In the same language as the user's question)]
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
```
|
| 44 |
+
Thought: I cannot answer the question with the provided tools.
|
| 45 |
+
Answer: [your answer here (In the same language as the user's question)]
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
## Current Conversation
|
| 49 |
+
|
| 50 |
+
Below is the current conversation consisting of interleaving human and assistant messages.
|