modular-rag-bot / services /nutrition_bot.py
gl-kp's picture
Update services/nutrition_bot.py
88db77b verified
from typing import Dict, List
from datetime import datetime
from langchain.agents import create_tool_calling_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'models'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'config'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'core'))
from models import memory_client
from workflow import agentic_rag
from config import config
class NutritionBot:
def __init__(self):
"""Initialize the NutritionBot class."""
self.memory = memory_client
self.client = ChatOpenAI(
model_name=config.CHAT_MODEL,
api_key=config.API_KEY,
openai_api_base=config.OPENAI_API_BASE,
temperature=0
)
tools = [agentic_rag]
system_prompt = """You are a caring and knowledgeable Medical Support Agent, specializing in nutrition disorder-related guidance. Your goal is to provide accurate, empathetic, and tailored nutritional recommendations while ensuring a seamless customer experience.
Guidelines for Interaction:
Maintain a polite, professional, and reassuring tone.
Show genuine empathy for customer concerns and health challenges.
Reference past interactions to provide personalized and consistent advice.
Engage with the customer by asking about their food preferences, dietary restrictions, and lifestyle before offering recommendations.
Ensure consistent and accurate information across conversations.
If any detail is unclear or missing, proactively ask for clarification.
Always use the agentic_rag tool to retrieve up-to-date and evidence-based nutrition insights.
Keep track of ongoing issues and follow-ups to ensure continuity in support.
Your primary goal is to help customers make informed nutrition decisions that align with their health conditions and personal preferences.
"""
prompt = ChatPromptTemplate.from_messages([
("system", system_prompt),
("human", "{input}"),
("placeholder", "{agent_scratchpad}")
])
agent = create_tool_calling_agent(self.client, tools, prompt)
self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None):
"""Store customer interaction in memory for future reference."""
if metadata is None:
metadata = {}
metadata["timestamp"] = datetime.now().isoformat()
conversation = [
{"role": "user", "content": message},
{"role": "assistant", "content": response}
]
self.memory.add(
conversation,
user_id=user_id,
output_format="v1.1",
metadata=metadata
)
def get_relevant_history(self, user_id: str, query: str) -> List[Dict]:
"""Retrieve past interactions relevant to the current query."""
return self.memory.search(
query=query,
user_id=user_id,
limit=5
)
def handle_customer_query(self, user_id: str, query: str) -> str:
"""Process a customer's query and provide a response."""
relevant_history = self.get_relevant_history(user_id, query)
context = "Previous relevant interactions:\n"
for memory in relevant_history:
context += f"Customer: {memory['memory']}\n"
context += f"Support: {memory['memory']}\n"
context += "---\n"
print("Context: ", context)
prompt = f"""
Context:
{context}
Current customer query: {query}
Provide a helpful response that takes into account any relevant past interactions.
"""
response = self.agent_executor.invoke({"input": prompt})
self.store_customer_interaction(
user_id=user_id,
message=query,
response=response["output"],
metadata={"type": "support_query"}
)
return response['output']