| |
|
| | |
| | import os |
| | import chromadb |
| | from dotenv import load_dotenv |
| | import json |
| |
|
| | |
| | from langchain_core.documents import Document |
| | from langchain_core.runnables import RunnablePassthrough |
| | from langchain_core.output_parsers import StrOutputParser |
| | from langchain.prompts import ChatPromptTemplate |
| | from langchain.chains.query_constructor.base import AttributeInfo |
| | from langchain.retrievers.self_query.base import SelfQueryRetriever |
| | from langchain.retrievers.document_compressors import LLMChainExtractor, CrossEncoderReranker |
| | from langchain.retrievers import ContextualCompressionRetriever |
| |
|
| | |
| | from langchain_community.vectorstores import Chroma |
| | from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader |
| | from langchain_community.cross_encoders import HuggingFaceCrossEncoder |
| | from langchain_experimental.text_splitter import SemanticChunker |
| | from langchain.text_splitter import ( |
| | CharacterTextSplitter, |
| | RecursiveCharacterTextSplitter |
| | ) |
| | from langchain_core.tools import tool |
| | from langchain.agents import create_tool_calling_agent, AgentExecutor |
| | from langchain_core.prompts import ChatPromptTemplate |
| |
|
| | |
| | from langchain_openai import OpenAIEmbeddings, ChatOpenAI |
| | from langchain_openai import ChatOpenAI |
| | from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI |
| | from langchain.embeddings.openai import OpenAIEmbeddings |
| |
|
| | |
| | from llama_parse import LlamaParse |
| | from llama_index.core import Settings, SimpleDirectoryReader |
| |
|
| | |
| | from langgraph.graph import StateGraph, END, START |
| |
|
| | |
| | from pydantic import BaseModel |
| |
|
| | |
| | from typing import Dict, List, Tuple, Any, TypedDict |
| |
|
| | |
| | import numpy as np |
| | from groq import Groq |
| | from mem0 import MemoryClient |
| | import streamlit as st |
| | from datetime import datetime |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | api_key = os.environ.get("API_KEY") |
| | |
| | endpoint = os.environ.get("OPENAI_API_BASE") |
| | |
| | m0_api_key = os.environ.get("M0_API_KEY") |
| | |
| | llama_api_key = os.environ.get("LLAMA_API_KEY") |
| |
|
| | |
| | embedding_function = chromadb.utils.embedding_functions.OpenAIEmbeddingFunction( |
| | api_base=endpoint, |
| | api_key=api_key, |
| | model_name='text-embedding-ada-002' |
| | ) |
| |
|
| | |
| |
|
| | |
| | embedding_model = OpenAIEmbeddings( |
| | openai_api_base=endpoint, |
| | openai_api_key=api_key, |
| | model='text-embedding-ada-002' |
| | ) |
| |
|
| |
|
| | |
| | llm = ChatOpenAI( |
| | openai_api_base=endpoint, |
| | openai_api_key=api_key, |
| | model="gpt-4o-mini", |
| | streaming=False |
| | ) |
| | |
| |
|
| | |
| | Settings.llm = llm |
| | Settings.embedding = embedding_model |
| |
|
| | |
| |
|
| | class AgentState(TypedDict): |
| | query: str |
| | expanded_query: str |
| | context: List[Dict[str, Any]] |
| | response: str |
| | precision_score: float |
| | groundedness_score: float |
| | groundedness_loop_count: int |
| | precision_loop_count: int |
| | feedback: str |
| | query_feedback: str |
| | groundedness_check: bool |
| | loop_max_iter: int |
| |
|
| | def expand_query(state: AgentState) -> AgentState: |
| | """ |
| | Expands the user query to improve retrieval of nutrition disorder-related information using few-shot prompting. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the user query. |
| | |
| | Returns: |
| | Dict: The updated state with the expanded query. |
| | """ |
| | system_message = ''' |
| | You are a domain expert assisting in answering questions related to nutrition disorders. |
| | Convert the user query into something that a nutritionist would understand. Use domain related words. |
| | Perform query expansion on the question received. If there are multiple common ways of phrasing a user question \ |
| | or common synonyms for key words in the question, make sure to return multiple versions of the query with the different phrasings. |
| | In case you have a valid and not empty feedback, you should use that feedback to improve the expanded query. |
| | |
| | If the query has multiple parts, split them into separate simpler queries. This is the only case where you can generate more than 1 query. |
| | |
| | If there are acronyms or words you are not familiar with, do not try to rephrase them. |
| | |
| | Return only 1 version of the query that expands the original user query to improve the retrieval of nutrition disorder-related information. |
| | Do not mention anything before or after the query. |
| | ''' |
| |
|
| | expand_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Expand this query: {query} using the feedback: {query_feedback}") |
| | ]) |
| |
|
| | chain = expand_prompt | llm | StrOutputParser() |
| | state['expanded_query'] = chain.invoke({"query": state['query'], "query_feedback":state["query_feedback"]}) |
| | return state |
| |
|
| | |
| | vector_store = Chroma( |
| | collection_name="semantic_chunks", |
| | persist_directory="./nutritional_db", |
| | embedding_function=embedding_model |
| | ) |
| |
|
| | |
| | retriever = vector_store.as_retriever( |
| | search_type='similarity', |
| | search_kwargs={'k': 5} |
| | ) |
| |
|
| | def retrieve_context(state: AgentState) -> AgentState: |
| | """ |
| | Retrieves context from the vector store using the expanded or original query. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the query and expanded query. |
| | |
| | Returns: |
| | Dict: The updated state with the retrieved context. |
| | """ |
| | query = state['expanded_query'] |
| | print("Query used for retrieval:", query) |
| |
|
| | |
| | docs = retriever.invoke(query) |
| | print("Retrieved documents:", docs) |
| |
|
| | |
| | state['context'] = [ |
| | { |
| | "content": doc.page_content, |
| | "metadata": doc.metadata |
| | } |
| | for doc in docs |
| | ] |
| |
|
| | print("Extracted context with metadata:", state['context']) |
| | return state |
| |
|
| | def craft_response(state: Dict) -> Dict: |
| | """ |
| | Generates a response using the retrieved context, focusing on nutrition disorders. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the query and retrieved context. |
| | |
| | Returns: |
| | Dict: The updated state with the generated response. |
| | """ |
| | print("---------craft_response---------") |
| | system_message = ''' |
| | You are a domain expert assisting in answering questions related to nutrition disorders. |
| | Your goal is to provide accurate, helpful, and concise responses based on the provided context, which includes medical documents about nutritional disorders. |
| | This context will begin with the token Context: and will finish before the token feedback:. |
| | In case you have a valid and not empty feedback, you should use that feedback to improve your answer. |
| | This feedback will begin with the token feedback:. |
| | |
| | When crafting your response: |
| | 1. Select only the relevant context to answer the question. |
| | 2. User questions will begin with the token Query: and will finish before the token Context:. |
| | 3. Use the feedback provided, if it exists, to improve the accuracy of your response. |
| | |
| | Please adhere to the following guidelines: |
| | - Your response should only be about the question asked and nothing else. |
| | - Answer only using the context provided. |
| | - Do not mention anything about the context or the feedback in your final answer. |
| | ''' |
| |
|
| | response_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Query: {query}\nContext: {context}\n\nfeedback: {feedback}") |
| | ]) |
| |
|
| | chain = response_prompt | llm |
| | response = chain.invoke({ |
| | "query": state['query'], |
| | "context": "\n".join([doc["content"] for doc in state['context']]), |
| | "feedback": state['feedback'] |
| | }) |
| | state['response'] = response |
| | print("intermediate response: ", response) |
| |
|
| | return state |
| |
|
| | def score_groundedness(state: Dict) -> Dict: |
| | """ |
| | Checks whether the response is grounded in the retrieved context. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the response and context. |
| | |
| | Returns: |
| | Dict: The updated state with the groundedness score. |
| | """ |
| | print("---------check_groundedness---------") |
| | system_message = ''' |
| | You are an evaluator, and you are tasked with rating AI-generated responses to questions posed by users. |
| | You need to assess whether the provided response is grounded in the provided context. |
| | |
| | Please act as an impartial judge and evaluate the quality of the provided response, which attempts to answer a user question based on a provided context. |
| | You will be presented with a context used by the AI model to generate the response and an AI-generated response to the question. |
| | In the input, the context provided will begin with Context: and ends before the token Response:, while the AI-generated response will begin with Response:. |
| | |
| | Evaluation criteria: |
| | |
| | Score on a scale from 1 to 5: |
| | |
| | - 5 = Fully grounded: All facts are clearly and directly supported by the conext provided. |
| | - 4 = Mostly grounded: Minor inference but closely tied to the context provided. |
| | - 3 = Partially grounded: Some facts supported by the context provided, others assumed by the AI model. |
| | - 2 = Weakly grounded: Most facts are not clearly supported by the context provided and generated by the AI model. |
| | - 1 = Not grounded: Contradicts or is unrelated to the context provided. |
| | |
| | Respond with a single number from 1 to 5 only. |
| | ''' |
| |
|
| | groundedness_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Context: {context}\nResponse: {response}\n\nGroundedness score:") |
| | ]) |
| |
|
| | chain = groundedness_prompt | llm | StrOutputParser() |
| | groundedness_score = float(chain.invoke({ |
| | "context": "\n".join([doc["content"] for doc in state['context']]), |
| | "response": state["response"] |
| | })) |
| | print("groundedness_score: ", groundedness_score) |
| | state['groundedness_loop_count'] += 1 |
| | print("#########Groundedness Incremented###########") |
| | state['groundedness_score'] = groundedness_score |
| |
|
| | return state |
| |
|
| | def check_precision(state: Dict) -> Dict: |
| | """ |
| | Checks whether the response precisely addresses the user’s query. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the query and response. |
| | |
| | Returns: |
| | Dict: The updated state with the precision score. |
| | """ |
| | print("---------check_precision---------") |
| | system_message = ''' |
| | Given the user query and the AI-generated response, verify if the AI-generated response precisely addresses the user’s query. |
| | In the input, the user query will begin with Query: and ends before the token Response:, while the AI generated response will begin with Response:. |
| | |
| | Give verdict as 1 if the AI-generated response addresses the user’s query and 0 if not. |
| | |
| | DO NOT output anything else before or after the veredict integer value of 1 or 0. |
| | ''' |
| |
|
| | precision_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Query: {query}\nResponse: {response}\n\nPrecision score:") |
| | ]) |
| |
|
| | chain = precision_prompt | llm | StrOutputParser() |
| | precision_score = float(chain.invoke({ |
| | "query": state['query'], |
| | "response": state['response'] |
| | })) |
| | state['precision_score'] = precision_score |
| | print("precision_score:", precision_score) |
| | state['precision_loop_count'] += 1 |
| | print("#########Precision Incremented###########") |
| | return state |
| |
|
| | def refine_response(state: Dict) -> Dict: |
| | """ |
| | Suggests improvements for the generated response. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the query and response. |
| | |
| | Returns: |
| | Dict: The updated state with response refinement suggestions. |
| | """ |
| | print("---------refine_response---------") |
| |
|
| | system_message = ''' |
| | You are an expert in reviewing AI-generated responses, and your task is to provide constructive feedback on the following response to help improve its accuracy, clarity, and completeness. |
| | In the input, the user query will begin with Query: and ends before the token Response:, while the AI-generated response will begin with Response:. |
| | You need to identify potential gaps, unsupported claims, ambiguous language, or missing details in the response. |
| | |
| | Do not rewrite the response; only suggest improvements in a concise, bullet-point format when possible, to enhance accuracy and completeness. |
| | ''' |
| |
|
| | refine_response_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Query: {query}\nResponse: {response}\n\n" |
| | "What improvements can be made to enhance accuracy and completeness?") |
| | ]) |
| |
|
| | chain = refine_response_prompt | llm | StrOutputParser() |
| |
|
| | |
| | feedback = f"Previous Response: {state['response']}\nSuggestions: {chain.invoke({'query': state['query'], 'response': state['response']})}" |
| | print("feedback: ", feedback) |
| | print(f"State: {state}") |
| | state['feedback'] = feedback |
| | return state |
| |
|
| | def refine_query(state: Dict) -> Dict: |
| | """ |
| | Suggests improvements for the expanded query. |
| | |
| | Args: |
| | state (Dict): The current state of the workflow, containing the query and expanded query. |
| | |
| | Returns: |
| | Dict: The updated state with query refinement suggestions. |
| | """ |
| | print("---------refine_query---------") |
| | system_message = ''' |
| | You are a query optimization expert, and your task is to provide constructive suggestions to improve the expanded query. |
| | Take the user original query as a reference and focus on improve the expanded query identifying missing keywords, refining the scope, resolving ambiguities, and increasing precision for better information retrieval. |
| | In the input, the user original query will begin with Original Query:, while the expanded query will begin with Expanded Query:. |
| | |
| | Do not replace the expanded query; instead, suggest improvements in a structured and actionable format such as bullet points or brief explanations. |
| | ''' |
| |
|
| | refine_query_prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_message), |
| | ("user", "Original Query: {query}\nExpanded Query: {expanded_query}\n\n" |
| | "What improvements can be made for a better search?") |
| | ]) |
| |
|
| | chain = refine_query_prompt | llm | StrOutputParser() |
| |
|
| | |
| | query_feedback = f"Previous Expanded Query: {state['expanded_query']}\nSuggestions: {chain.invoke({'query': state['query'], 'expanded_query': state['expanded_query']})}" |
| | print("query_feedback: ", query_feedback) |
| | print(f"Groundedness loop count: {state['groundedness_loop_count']}") |
| | state['query_feedback'] = query_feedback |
| | return state |
| |
|
| | def should_continue_groundedness(state): |
| | """Decides if groundedness is sufficient or needs improvement.""" |
| | print("---------should_continue_groundedness---------") |
| | print("groundedness loop count: ", state['groundedness_loop_count']) |
| | if state['groundedness_score'] >= 4: |
| | print("Moving to precision") |
| | return "check_precision" |
| | else: |
| | if state["groundedness_loop_count"] > state['loop_max_iter']: |
| | return "max_iterations_reached" |
| | else: |
| | print(f"---------Groundedness Score Threshold Not met. Refining Response-----------") |
| | return "refine_response" |
| |
|
| | def should_continue_precision(state: Dict) -> str: |
| | """Decides if precision is sufficient or needs improvement.""" |
| | print("---------should_continue_precision---------") |
| | print("precision loop count: ", state["precision_loop_count"]) |
| | if state['precision_score'] == 1: |
| | return "pass" |
| | else: |
| | if state["precision_loop_count"] > state['loop_max_iter']: |
| | return "max_iterations_reached" |
| | else: |
| | print(f"---------Precision Score Threshold Not met. Refining Query-----------") |
| | return "refine_query" |
| |
|
| | def max_iterations_reached(state: AgentState) -> AgentState: |
| | """Handles the case where max iterations are reached.""" |
| | state['response'] = "We need more context to provide an accurate answer." |
| | return state |
| |
|
| |
|
| | from langgraph.graph import END, StateGraph, START |
| |
|
| | def create_workflow() -> StateGraph: |
| | """Creates the updated workflow for the AI nutrition agent.""" |
| | workflow = StateGraph(AgentState) |
| |
|
| | |
| | workflow.add_node("expand_query", expand_query) |
| | workflow.add_node("retrieve_context", retrieve_context) |
| | workflow.add_node("craft_response", craft_response) |
| | workflow.add_node("score_groundedness", score_groundedness) |
| | workflow.add_node("refine_response", refine_response) |
| | workflow.add_node("check_precision", check_precision) |
| | workflow.add_node("refine_query", refine_query) |
| | workflow.add_node("max_iterations_reached", max_iterations_reached) |
| |
|
| | |
| | workflow.add_edge(START, "expand_query") |
| | workflow.add_edge("expand_query", "retrieve_context") |
| | workflow.add_edge("retrieve_context", "craft_response") |
| | workflow.add_edge("craft_response", "score_groundedness") |
| |
|
| | |
| | workflow.add_conditional_edges( |
| | "score_groundedness", |
| | should_continue_groundedness, |
| | { |
| | "check_precision": "check_precision", |
| | "refine_response": "refine_response", |
| | "max_iterations_reached": "max_iterations_reached" |
| | } |
| | ) |
| |
|
| | workflow.add_edge("refine_response", "craft_response") |
| |
|
| | |
| | workflow.add_conditional_edges( |
| | "check_precision", |
| | should_continue_precision, |
| | { |
| | "pass": END, |
| | "refine_query": "refine_query", |
| | "max_iterations_reached": "max_iterations_reached" |
| | } |
| | ) |
| |
|
| | workflow.add_edge("refine_query", "expand_query") |
| | workflow.add_edge("max_iterations_reached", END) |
| |
|
| | return workflow |
| |
|
| |
|
| | |
| | WORKFLOW_APP = create_workflow().compile() |
| |
|
| | @tool |
| | def agentic_rag(query: str): |
| | """ |
| | Runs the RAG-based agent with conversation history for context-aware responses. |
| | |
| | Args: |
| | query (str): The current user query. |
| | |
| | Returns: |
| | Dict[str, Any]: The updated state with the generated response and conversation history. |
| | """ |
| | |
| | inputs = { |
| | "query": query, |
| | "expanded_query": "", |
| | "context": [], |
| | "response": "", |
| | "precision_score": 0, |
| | "groundedness_score": 0, |
| | "groundedness_loop_count": 0, |
| | "precision_loop_count": 0, |
| | "feedback": "", |
| | "query_feedback": "", |
| | "loop_max_iter": 5 |
| | } |
| |
|
| | output = WORKFLOW_APP.invoke(inputs) |
| |
|
| | return output |
| |
|
| |
|
| | |
| | llama_guard_client = Groq(api_key=llama_api_key) |
| | |
| | def filter_input_with_llama_guard(user_input, model="meta-llama/llama-guard-4-12b"): |
| | """ |
| | Filters user input using Llama Guard to ensure it is safe. |
| | |
| | Parameters: |
| | - user_input: The input provided by the user. |
| | - model: The Llama Guard model to be used for filtering (default is "meta-llama/llama-guard-4-12b"). |
| | |
| | Returns: |
| | - The filtered and safe input. |
| | """ |
| | try: |
| | |
| | response = llama_guard_client.chat.completions.create( |
| | messages=[{"role": "user", "content": user_input}], |
| | model=model, |
| | ) |
| | |
| | return response.choices[0].message.content.strip() |
| | except Exception as e: |
| | print(f"Error with Llama Guard: {e}") |
| | return None |
| |
|
| |
|
| | |
| |
|
| | class NutritionBot: |
| | def __init__(self): |
| | """ |
| | Initialize the NutritionBot class, setting up memory, the LLM client, tools, and the agent executor. |
| | """ |
| |
|
| | |
| | self.memory = MemoryClient(api_key=m0_api_key) |
| |
|
| | |
| | self.client = ChatOpenAI( |
| | model_name="gpt-4o-mini", |
| | openai_api_key= api_key, |
| | openai_api_base= endpoint, |
| | temperature=0 |
| | ) |
| |
|
| | |
| | tools = [agentic_rag] |
| |
|
| | |
| | system_prompt = """You are a caring and knowledgeable Medical Support Agent, specializing in nutrition disorder-related guidance. Your goal is to provide accurate, empathetic, and tailored nutritional recommendations while ensuring a seamless customer experience. |
| | Guidelines for Interaction: |
| | Maintain a polite, professional, and reassuring tone. |
| | Show genuine empathy for customer concerns and health challenges. |
| | Reference past interactions to provide personalized and consistent advice. |
| | Engage with the customer by asking about their food preferences, dietary restrictions, and lifestyle before offering recommendations. |
| | Ensure consistent and accurate information across conversations. |
| | If any detail is unclear or missing, proactively ask for clarification. |
| | Always use the agentic_rag tool to retrieve up-to-date and evidence-based nutrition insights. |
| | Keep track of ongoing issues and follow-ups to ensure continuity in support. |
| | Your primary goal is to help customers make informed nutrition decisions that align with their health conditions and personal preferences. |
| | |
| | """ |
| |
|
| | |
| | prompt = ChatPromptTemplate.from_messages([ |
| | ("system", system_prompt), |
| | ("human", "{input}"), |
| | ("placeholder", "{agent_scratchpad}") |
| | ]) |
| |
|
| | |
| | agent = create_tool_calling_agent(self.client, tools, prompt) |
| |
|
| | |
| | self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) |
| |
|
| |
|
| | def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None): |
| | """ |
| | Store customer interaction in memory for future reference. |
| | |
| | Args: |
| | user_id (str): Unique identifier for the customer. |
| | message (str): Customer's query or message. |
| | response (str): Chatbot's response. |
| | metadata (Dict, optional): Additional metadata for the interaction. |
| | """ |
| | if metadata is None: |
| | metadata = {} |
| |
|
| | |
| | metadata["timestamp"] = datetime.now().isoformat() |
| |
|
| | |
| | conversation = [ |
| | {"role": "user", "content": message}, |
| | {"role": "assistant", "content": response} |
| | ] |
| |
|
| | |
| | self.memory.add( |
| | conversation, |
| | user_id=user_id, |
| | output_format="v1.1", |
| | metadata=metadata |
| | ) |
| |
|
| |
|
| | def get_relevant_history(self, user_id: str, query: str) -> List[Dict]: |
| | """ |
| | Retrieve past interactions relevant to the current query. |
| | |
| | Args: |
| | user_id (str): Unique identifier for the customer. |
| | query (str): The customer's current query. |
| | |
| | Returns: |
| | List[Dict]: A list of relevant past interactions. |
| | """ |
| | return self.memory.search( |
| | query=query, |
| | user_id=user_id, |
| | limit= 5 |
| | ) |
| |
|
| |
|
| | def handle_customer_query(self, user_id: str, query: str) -> str: |
| | """ |
| | Process a customer's query and provide a response, taking into account past interactions. |
| | |
| | Args: |
| | user_id (str): Unique identifier for the customer. |
| | query (str): Customer's query. |
| | |
| | Returns: |
| | str: Chatbot's response. |
| | """ |
| |
|
| | |
| | relevant_history = self.get_relevant_history(user_id, query) |
| |
|
| | |
| | context = "Previous relevant interactions:\n" |
| | for memory in relevant_history: |
| | context += f"Customer: {memory['memory']}\n" |
| | context += f"Support: {memory['memory']}\n" |
| | context += "---\n" |
| |
|
| | |
| | print("Context: ", context) |
| |
|
| | |
| | prompt = f""" |
| | Context: |
| | {context} |
| | |
| | Current customer query: {query} |
| | |
| | Provide a helpful response that takes into account any relevant past interactions. |
| | """ |
| |
|
| | |
| | response = self.agent_executor.invoke({"input": prompt}) |
| |
|
| | |
| | self.store_customer_interaction( |
| | user_id=user_id, |
| | message=query, |
| | response=response["output"], |
| | metadata={"type": "support_query"} |
| | ) |
| |
|
| | |
| | return response['output'] |
| |
|
| |
|
| | |
| | def nutrition_disorder_streamlit(): |
| | """ |
| | A Streamlit-based UI for the Nutrition Disorder Specialist Agent. |
| | """ |
| | st.title("Nutrition Disorder Specialist") |
| | st.write("Ask me anything about nutrition disorders, symptoms, causes, treatments, and more.") |
| | st.write("You might try asking questions like:") |
| | st.write("- In what ways can increasing dietary fiber help alleviate symptoms of functional bowel disorders?") |
| | st.write("- What dietary changes can help reduce the risk of diabetes mellitus in those suffering from obesity?") |
| | st.write("- What are some effective dietary changes to help lower high cholesterol levels?") |
| | st.write("Type 'exit' to end the conversation.") |
| |
|
| | |
| | if 'chat_history' not in st.session_state: |
| | st.session_state.chat_history = [] |
| | if 'user_id' not in st.session_state: |
| | st.session_state.user_id = None |
| |
|
| | |
| | if st.session_state.user_id is None: |
| | with st.form("login_form", clear_on_submit=True): |
| | user_id = st.text_input("Please enter your name to begin:") |
| | submit_button = st.form_submit_button("Login") |
| | if submit_button and user_id: |
| | st.session_state.user_id = user_id |
| | st.session_state.chat_history.append({ |
| | "role": "assistant", |
| | "content": f"Welcome, {user_id}! How can I help you with nutrition disorders today?" |
| | }) |
| | st.session_state.login_submitted = True |
| | if st.session_state.get("login_submitted", False): |
| | st.session_state.pop("login_submitted") |
| | st.rerun() |
| | else: |
| | |
| | for message in st.session_state.chat_history: |
| | with st.chat_message(message["role"]): |
| | st.write(message["content"]) |
| |
|
| | |
| | user_query = st.chat_input("Type your question here (or 'exit' to end)...") |
| | if user_query: |
| | if user_query.lower() == "exit": |
| | st.session_state.chat_history.append({"role": "user", "content": "exit"}) |
| | with st.chat_message("user"): |
| | st.write("exit") |
| | goodbye_msg = "Goodbye! Feel free to return if you have more questions about nutrition disorders." |
| | st.session_state.chat_history.append({"role": "assistant", "content": goodbye_msg}) |
| | with st.chat_message("assistant"): |
| | st.write(goodbye_msg) |
| | st.session_state.user_id = None |
| | st.rerun() |
| | return |
| |
|
| | st.session_state.chat_history.append({"role": "user", "content": user_query}) |
| | with st.chat_message("user"): |
| | st.write(user_query) |
| |
|
| | |
| | filtered_result = filter_input_with_llama_guard(user_query) |
| | filtered_result = filtered_result.replace("\n", " ") |
| |
|
| | |
| | if filtered_result in ["safe", "unsafe S6", "unsafe S7"]: |
| | try: |
| | if 'chatbot' not in st.session_state: |
| | st.session_state.chatbot = NutritionBot() |
| | response = st.session_state.chatbot.handle_customer_query(st.session_state.user_id, user_query) |
| | |
| | st.write(response) |
| | st.session_state.chat_history.append({"role": "assistant", "content": response}) |
| | except Exception as e: |
| | error_msg = f"Sorry, I encountered an error while processing your query. Please try again. Error: {str(e)}" |
| | st.write(error_msg) |
| | st.session_state.chat_history.append({"role": "assistant", "content": error_msg}) |
| | else: |
| | inappropriate_msg = "I apologize, but I cannot process that input as it may be inappropriate. Please try again." |
| | st.write(inappropriate_msg) |
| | st.session_state.chat_history.append({"role": "assistant", "content": inappropriate_msg}) |
| |
|
| | if __name__ == "__main__": |
| | nutrition_disorder_streamlit() |
| |
|