Makhfi_AI / workflow /tools.py
Aasher's picture
Refactor workflow to integrate ChatGoogleGenerativeAI and update vectorstore embeddings
38ccc4e
from langchain_core.tools import tool
from langchain_core.runnables import RunnableConfig
from .vectorstore import vector_store
from .memory_client import memory_client
from .utils import format_docs, format_memories
from core.config import get_settings
settings = get_settings()
@tool
def search_vectorstore(
query: str,
limit: int = settings.TOP_K,
link: str = None,
) -> str:
"""
Searches the vectorstore for relevant documents.
Args:
query (str): A detailed, descriptive query in **English language**.
limit (int, optional): Number of Documents to retrieve. It should be between 2-8. Defaults to 4.
link (str, optional): A document-specific link used to filter results
to only include results from that source video. e.g. https://youtu.be/I-QWB9z0l9k?si=XgxcKklmE_cNGneD
Returns:
str: A string representation of the retrieved documents,
each wrapped in a `<Document>` XML tag
"""
limit = max(min(limit, 8), 2)
results = vector_store.similarity_search(
query, k=limit, filter={"link": link} if link else None
)
return format_docs(results)
@tool
def search_memories(query: str, config: RunnableConfig):
"""
Search memories about the user.
Args:
query (str): A natural language query in **English language**.
Returns:
str: A string representation of the user memories.
"""
user_id = config.get("configurable", {}).get("user_id")
memories = memory_client.search(
query, version="v2", filters={"AND": [{"user_id": user_id}]}
)
return format_memories(memories)