|
|
from langchain.tools import tool |
|
|
from langchain_community.tools import DuckDuckGoSearchResults |
|
|
from langchain_core.messages import HumanMessage |
|
|
from langchain_openai import ChatOpenAI |
|
|
from loguru import logger |
|
|
from rag import get_vector_store |
|
|
|
|
|
tool_model: ChatOpenAI = None |
|
|
|
|
|
|
|
|
def initialize_tool_llm(model): |
|
|
global tool_model |
|
|
tool_model = ChatOpenAI(model=model, temperature=0.7) |
|
|
return tool_model |
|
|
|
|
|
|
|
|
@tool |
|
|
def create_rag_response(user_query: str) -> str: |
|
|
""" |
|
|
Fetches relevant information for the user's query from a vector store. |
|
|
|
|
|
Args: |
|
|
user_query (str): The user's query. |
|
|
|
|
|
Returns: |
|
|
str: A concatenated string of relevant document contents retrieved from the vector store. |
|
|
""" |
|
|
try: |
|
|
|
|
|
retrieved_docs = get_vector_store().similarity_search(user_query) |
|
|
|
|
|
|
|
|
if not retrieved_docs: |
|
|
return "No relevant information found for the given query." |
|
|
|
|
|
|
|
|
return "\n\n".join(doc.page_content for doc in retrieved_docs) |
|
|
|
|
|
except Exception as e: |
|
|
|
|
|
return f"An error occurred while fetching relevant information: {str(e)}" |
|
|
|
|
|
|
|
|
@tool |
|
|
def analyze_rag_response(query, rag_response: str) -> str: |
|
|
""" |
|
|
This tool is designed to analyze the RAG (Retrieval-Augmented Generation) response generated for a user's query. |
|
|
|
|
|
When a user requests an analysis of the RAG response, this tool evaluates the response to ensure it matches the user's query. |
|
|
It provides insights into the response's relevance, clarity, and overall quality. |
|
|
|
|
|
Args: |
|
|
query (str): The original query provided by the user. |
|
|
rag_response (str): The response generated by the `create_rag_response` tool. |
|
|
|
|
|
Returns: |
|
|
str: A JSON analysis with: |
|
|
- **Original Response**: Full text of the RAG response. |
|
|
- **Key Points**: Main ideas in bullet points. |
|
|
- **Clarity**: A 1-10 clarity rating with reasoning. |
|
|
- **Relevance**: A 1-10 relevance rating. |
|
|
- **Suggestions**: Recommendations for improvement. |
|
|
|
|
|
Use Case: |
|
|
- Invoke when the user requests an analysis of the RAG response. |
|
|
- Compares the response to the query, highlights key points, and provides actionable feedback. |
|
|
""" |
|
|
|
|
|
prompt = f""" |
|
|
You are a highly capable analysis tool specializing in evaluating responses generated by a Retrieval-Augmented Generation (RAG) system. |
|
|
Your task is to analyze the given response and provide the following details: |
|
|
1. **Original Response**: Include the full text of the response being analyzed. |
|
|
2. **Key Points**: Extract the main ideas or key points presented in the response. Provide these in a concise bullet-point format. |
|
|
3. **Clarity**: Assess the clarity of the response on a scale of 1 to 10, where 10 indicates perfect clarity. Briefly explain your reasoning. |
|
|
4. **Relevance**: Determine whether the response directly answers the query it was generated for. Rate relevance on a scale of 1 to 10. |
|
|
5. **Suggestions for Improvement**: If applicable, provide suggestions on how the response could be improved for better quality, clarity, or relevance. |
|
|
|
|
|
### Input: |
|
|
Query: {query} |
|
|
RAG Response: {rag_response} |
|
|
|
|
|
### Output: |
|
|
Provide your analysis in the following JSON format: |
|
|
{{ |
|
|
"original_response": "{rag_response}", |
|
|
"key_points": ["Key point 1", "Key point 2", "Key point 3"], |
|
|
"clarity": "Clarity rating (1-10) - Explanation", |
|
|
"relevance": "Relevance rating (1-10)", |
|
|
"suggestions": ["Suggestion 1", "Suggestion 2"] |
|
|
}} |
|
|
""" |
|
|
response = tool_model.invoke([HumanMessage(content=prompt)]) |
|
|
return response.content |
|
|
|
|
|
|
|
|
@tool |
|
|
def web_search(user_query: str) -> str: |
|
|
""" |
|
|
Performs a web search using DuckDuckGo to fetch the top 5 relevant results for the user's query. |
|
|
|
|
|
This tool is used when no relevant information is found in the RAG response. It retrieves |
|
|
the top 5 search results from DuckDuckGo, including titles, snippets, and URLs. |
|
|
|
|
|
Args: |
|
|
user_query (str): The user's query. |
|
|
|
|
|
Returns: |
|
|
str: A summary of the top 5 search results or a message indicating that no information was found. |
|
|
|
|
|
Example: |
|
|
Input: |
|
|
user_query = "Latest advancements in quantum computing." |
|
|
Output: |
|
|
"Top Search Results: |
|
|
1. Title: Quantum Computing Breakthroughs |
|
|
Snippet: Recent advancements in quantum computing include... |
|
|
URL: https://example.com/article1 |
|
|
2. Title: The Future of Quantum Tech |
|
|
Snippet: Quantum technologies are set to revolutionize... |
|
|
URL: https://example.com/article2 |
|
|
... |
|
|
" |
|
|
""" |
|
|
try: |
|
|
|
|
|
duck_duck_go = DuckDuckGoSearchResults(max_results=5, output_format="list") |
|
|
search_results = duck_duck_go.invoke(user_query) |
|
|
|
|
|
if search_results: |
|
|
formatted_results = [] |
|
|
for i, result in enumerate(search_results, start=1): |
|
|
title = result["title"] |
|
|
snippet = result["snippet"] |
|
|
url = result["link"] |
|
|
formatted_results.append( |
|
|
f"{i}. Title: {title}\n Snippet: {snippet}\n URL: {url}" |
|
|
) |
|
|
logger.info(f"Websearch completed, no of documents retrieved are {formatted_results}") |
|
|
return "Top Search Results:\n\n" + "\n\n".join(formatted_results) |
|
|
else: |
|
|
logger.info("Websearch completed, no documents retrieved") |
|
|
return "No relevant web search results were found for the given query." |
|
|
|
|
|
except Exception as e: |
|
|
return f"An error occurred during the web search: {str(e)}" |
|
|
|