Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from huggingface_hub import InferenceClient | |
| from textblob import TextBlob | |
| from langchain.prompts import PromptTemplate | |
| from dotenv import load_dotenv | |
| import pandas as pd | |
| # Load environment variables | |
| load_dotenv() | |
| # Load the drug-side effects dataset | |
| df = pd.read_csv('./drugs_side_effects_drugs_com.csv') | |
| df = df[['drug_name', 'side_effects']].dropna() | |
| # Create a set of valid drug names to compare user queries | |
| valid_drugs = set(df['drug_name'].str.lower()) | |
| # Configure Hugging Face API | |
| client = InferenceClient( | |
| "microsoft/Phi-3-mini-4k-instruct", | |
| token=os.getenv("HF_API_KEY"), | |
| ) | |
| # Define System Prompts | |
| SYSTEM_PROMPT_GENERAL = """ | |
| You are CareBot, a pharmacist and medical expert known as Treasure. Your goal is to provide empathetic, supportive, and detailed responses tailored to the user's needs. | |
| Behavior Guidelines: | |
| 1. Introduction: Greet the user as Treasure during the first interaction. | |
| 2. Personalization: Adapt responses to the user's tone and emotional state. | |
| 3. Empathy: Respond warmly to the user's concerns and questions. | |
| 4. Evidence-Based: Use reliable sources to answer queries. For missing data, advise seeking professional consultation. | |
| 5. Focus: Avoid providing off-topic information; address the user's query specifically. | |
| 6. Encouragement: Balance acknowledging concerns with actionable and constructive suggestions. | |
| 7. Context Integration: Use the given context to deliver accurate and relevant answers without repeating the context explicitly. | |
| Objective: | |
| Deliver thoughtful, empathetic, and medically sound advice based on the user’s query. | |
| Response Style: | |
| - Detailed but concise | |
| - Professional, empathetic tone | |
| - Clear and actionable guidance | |
| """ | |
| # Define LangChain Prompt Template | |
| prompt_template = PromptTemplate( | |
| input_variables=["system_prompt", "user_input"], | |
| template="{system_prompt}\n\nUser: {user_input}\nAssistant:" | |
| ) | |
| st.title("CareBot: AI Medical Assistant for Drug Information and Side Effects") | |
| # Initialize the session state | |
| if "messages" not in st.session_state: | |
| st.session_state["messages"] = [ | |
| { | |
| "role": "assistant", | |
| "content": ( | |
| "Hi there! I'm Treasure, your friendly pharmacist. " | |
| "This AI-powered chatbot provides reliable information about drugs, their side effects, " | |
| "and related medical conditions. Powered by the Groq API and LangChain, it delivers real-time, " | |
| "accurate responses.\n\n" | |
| "Example Questions:\n" | |
| "- What are the side effects of aspirin?\n" | |
| "- Can ibuprofen cause dizziness?\n\n" | |
| "Disclaimer: This chatbot is for informational purposes only and not a substitute for professional " | |
| "medical advice.\n\n\n" | |
| "How can I help you today?" | |
| ) | |
| } | |
| ] | |
| # Display previous messages | |
| for msg in st.session_state.messages: | |
| st.chat_message(msg["role"]).write(msg["content"]) | |
| # Function to check if the user's query is relevant to the dataset | |
| def is_relevant_query(query): | |
| query_tokens = set(query.lower().split()) | |
| return bool(valid_drugs.intersection(query_tokens)) | |
| # Chat input and processing | |
| if prompt := st.chat_input(): | |
| # Append user message to the session state | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| st.chat_message("user").write(prompt) | |
| # Sentiment Analysis | |
| user_sentiment = TextBlob(prompt).sentiment.polarity | |
| # Check if the query is relevant to the drug-side effects dataset | |
| system_prompt = SYSTEM_PROMPT_GENERAL | |
| if user_sentiment < 0: # User expresses negative sentiment | |
| system_prompt = f"""{system_prompt} | |
| The user seems to be feeling down. Prioritize empathetic responses and open-ended questions.""" | |
| # Format prompt using LangChain's PromptTemplate | |
| formatted_prompt = prompt_template.format( | |
| system_prompt=system_prompt, | |
| user_input=prompt | |
| ) | |
| # Generate a response using Hugging Face API | |
| response = "" | |
| for message in client.chat_completion( | |
| messages=[{"role": "user", "content": formatted_prompt}], | |
| max_tokens=500, | |
| stream=True, | |
| ): | |
| response += message.choices[0].delta.content | |
| # Process response for specific tokens | |
| if "Ashley:" in response: | |
| response = response.split("Treasure:")[1].strip() | |
| elif "User:" in response: | |
| response = response.split("Assistant:")[1].strip() | |
| # Append assistant message to the session state | |
| st.session_state.messages.append({"role": "assistant", "content": response.strip()}) | |
| st.chat_message("assistant").write(response.strip()) |