Update app.py
Browse files
app.py
CHANGED
|
@@ -1,153 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
from dotenv import load_dotenv
|
| 3 |
-
load_dotenv()
|
| 4 |
|
| 5 |
-
from langchain_astradb import AstraDBVectorStore
|
| 6 |
-
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
| 7 |
-
from langchain.prompts import PromptTemplate
|
| 8 |
-
from langchain.chains import ConversationalRetrievalChain
|
| 9 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 10 |
import streamlit as st
|
| 11 |
-
import
|
| 12 |
-
import textwrap
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
| 26 |
|
| 27 |
-
#
|
| 28 |
-
vstore = AstraDBVectorStore(
|
| 29 |
-
collection_name = "Bhagavad_gita_data",
|
| 30 |
-
embedding = embeddings,
|
| 31 |
-
token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
|
| 32 |
-
api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"),
|
| 33 |
-
)
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
prompt_template = """
|
| 39 |
-
You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges.
|
| 40 |
-
You are going to be used for a psychiatrist
|
| 41 |
Follow these guidelines:
|
| 42 |
-
|
| 43 |
1. Begin with a brief, relatable insight from timeless teachings.
|
| 44 |
-
|
| 45 |
2. Offer 4 to 6 specific, actionable points of advice.
|
| 46 |
-
|
| 47 |
3. Each point should start on a new line and be clear and concise.
|
| 48 |
-
|
| 49 |
4. Connect each piece of advice to universal principles of success and well-being.
|
| 50 |
-
|
| 51 |
5. Use metaphors or examples from ancient texts without explicitly naming them.
|
| 52 |
-
|
| 53 |
6. Conclude with an encouraging statement that motivates the user to apply the advice.
|
| 54 |
-
|
| 55 |
7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.
|
| 56 |
-
|
| 57 |
8. Ensure your response is practical, universally applicable, and inspirational.
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book and give verse Translation and number only as many don't know to read sanskrit.
|
| 62 |
|
| 63 |
-
|
|
|
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
Human: {human_input}
|
| 68 |
-
Chat History: {chat_history}
|
| 69 |
"""
|
| 70 |
|
| 71 |
-
|
| 72 |
PROMPT = PromptTemplate(
|
| 73 |
-
template
|
| 74 |
-
input_variables
|
| 75 |
-
)
|
| 76 |
-
|
| 77 |
-
qa_chain = ConversationalRetrievalChain.from_llm(
|
| 78 |
-
llm,
|
| 79 |
-
retriever = retriever,
|
| 80 |
-
combine_docs_chain_kwargs = {"prompt": PROMPT},
|
| 81 |
-
return_source_documents = False,
|
| 82 |
)
|
| 83 |
|
| 84 |
-
#
|
| 85 |
-
def format_and_wrap_text(text, wrap_length=100):
|
| 86 |
-
# Split the text into main points
|
| 87 |
-
main_points = text.split('**')
|
| 88 |
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
if subpoint.strip():
|
| 98 |
-
# Wrap each subpoint and add a bullet
|
| 99 |
-
wrapped_subpoint = textwrap.fill(subpoint, wrap_length)
|
| 100 |
-
formatted_text += f"{wrapped_subpoint}\n"
|
| 101 |
|
| 102 |
-
|
|
|
|
|
|
|
| 103 |
|
| 104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
|
| 106 |
-
#
|
| 107 |
-
st.set_page_config(page_title="Arjun AI")
|
| 108 |
|
| 109 |
-
#
|
| 110 |
-
st.
|
| 111 |
-
st.
|
| 112 |
|
| 113 |
-
# Initialize chat history
|
| 114 |
if "messages" not in st.session_state:
|
| 115 |
st.session_state.messages = []
|
| 116 |
|
| 117 |
-
# Display
|
| 118 |
-
for
|
| 119 |
-
with st.chat_message(
|
| 120 |
-
st.markdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
-
# React to user input
|
| 123 |
-
if prompt := st.chat_input("What is your question?"):
|
| 124 |
-
# Display user message in chat message container
|
| 125 |
-
st.chat_message("user").markdown(prompt)
|
| 126 |
-
# Add user message to chat history
|
| 127 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
|
|
|
| 128 |
|
| 129 |
with st.chat_message("assistant"):
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
#
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
time.sleep(0.05)
|
| 145 |
-
# Add a blinking cursor to simulate typing
|
| 146 |
-
message_placeholder.markdown(full_response)
|
| 147 |
-
|
| 148 |
-
message_placeholder.markdown(full_response)
|
| 149 |
-
|
| 150 |
-
# Add assistant response to chat history
|
| 151 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 152 |
-
|
| 153 |
|
|
|
|
| 1 |
+
# import os
|
| 2 |
+
# from dotenv import load_dotenv
|
| 3 |
+
# load_dotenv()
|
| 4 |
+
|
| 5 |
+
# from langchain_astradb import AstraDBVectorStore
|
| 6 |
+
# from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
| 7 |
+
# from langchain.prompts import PromptTemplate
|
| 8 |
+
# from langchain.chains import ConversationalRetrievalChain
|
| 9 |
+
# from langchain_google_genai import ChatGoogleGenerativeAI
|
| 10 |
+
# import streamlit as st
|
| 11 |
+
# import time
|
| 12 |
+
# import textwrap
|
| 13 |
+
|
| 14 |
+
# # embeddings
|
| 15 |
+
# embeddings = GoogleGenerativeAIEmbeddings(
|
| 16 |
+
# model = "models/embedding-001",
|
| 17 |
+
# task_type = "retrieval_document"
|
| 18 |
+
# )
|
| 19 |
+
|
| 20 |
+
# # llm
|
| 21 |
+
# llm = ChatGoogleGenerativeAI(
|
| 22 |
+
# # model = "gemini-1.5-pro",
|
| 23 |
+
# model = "Gemini 2.0 Flash",
|
| 24 |
+
# temperature = 0.7,
|
| 25 |
+
# )
|
| 26 |
+
|
| 27 |
+
# # Get Info about the Database
|
| 28 |
+
# vstore = AstraDBVectorStore(
|
| 29 |
+
# collection_name = "Bhagavad_gita_data",
|
| 30 |
+
# embedding = embeddings,
|
| 31 |
+
# token = os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
|
| 32 |
+
# api_endpoint = os.getenv("ASTRA_DB_API_ENDPOINT"),
|
| 33 |
+
# )
|
| 34 |
+
|
| 35 |
+
# # Now Retrieve the Documents from Server
|
| 36 |
+
# retriever = vstore.as_retriever(search_kwargs = {"k" : 5})
|
| 37 |
+
|
| 38 |
+
# prompt_template = """
|
| 39 |
+
# You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges.
|
| 40 |
+
# You are going to be used for a psychiatrist assitance who gives advices on the context of bhagvad gita.
|
| 41 |
+
# Follow these guidelines:
|
| 42 |
+
|
| 43 |
+
# 1. Begin with a brief, relatable insight from timeless teachings.
|
| 44 |
+
|
| 45 |
+
# 2. Offer 4 to 6 specific, actionable points of advice.
|
| 46 |
+
|
| 47 |
+
# 3. Each point should start on a new line and be clear and concise.
|
| 48 |
+
|
| 49 |
+
# 4. Connect each piece of advice to universal principles of success and well-being.
|
| 50 |
+
|
| 51 |
+
# 5. Use metaphors or examples from ancient texts without explicitly naming them.
|
| 52 |
+
|
| 53 |
+
# 6. Conclude with an encouraging statement that motivates the user to apply the advice.
|
| 54 |
+
|
| 55 |
+
# 7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.
|
| 56 |
+
|
| 57 |
+
# 8. Ensure your response is practical, universally applicable, and inspirational.
|
| 58 |
+
|
| 59 |
+
# 9. Be strict that if some gives some wrong or useless input which is not relevant to physcological issue or dilema then reply them to enter the proper question
|
| 60 |
+
|
| 61 |
+
# 10. If possible try to give only Bhagavad Gita Verse related to it at end don't get any other verse from any other book and give verse Translation and number only as many don't know to read sanskrit.
|
| 62 |
+
|
| 63 |
+
# 11. If you don't know the verse from Bhagavad Gita just search through the context and then give Answer in Hindi.
|
| 64 |
+
|
| 65 |
+
# Context: {context}
|
| 66 |
+
# Question: {question}
|
| 67 |
+
# Human: {human_input}
|
| 68 |
+
# Chat History: {chat_history}
|
| 69 |
+
# """
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# PROMPT = PromptTemplate(
|
| 73 |
+
# template = prompt_template,
|
| 74 |
+
# input_variables = ["context", "question", "human_input", "chat_history"]
|
| 75 |
+
# )
|
| 76 |
+
|
| 77 |
+
# qa_chain = ConversationalRetrievalChain.from_llm(
|
| 78 |
+
# llm,
|
| 79 |
+
# retriever = retriever,
|
| 80 |
+
# combine_docs_chain_kwargs = {"prompt": PROMPT},
|
| 81 |
+
# return_source_documents = False,
|
| 82 |
+
# )
|
| 83 |
+
|
| 84 |
+
# # format the output in good format
|
| 85 |
+
# def format_and_wrap_text(text, wrap_length=100):
|
| 86 |
+
# # Split the text into main points
|
| 87 |
+
# main_points = text.split('**')
|
| 88 |
+
|
| 89 |
+
# formatted_text = ""
|
| 90 |
+
# for i in range(1, len(main_points), 2):
|
| 91 |
+
# # Add the main point title
|
| 92 |
+
# formatted_text += f"{main_points[i]}\n"
|
| 93 |
+
|
| 94 |
+
# # Split the subpoints by '* '
|
| 95 |
+
# subpoints = main_points[i+1].strip().split('* ')
|
| 96 |
+
# for subpoint in subpoints:
|
| 97 |
+
# if subpoint.strip():
|
| 98 |
+
# # Wrap each subpoint and add a bullet
|
| 99 |
+
# wrapped_subpoint = textwrap.fill(subpoint, wrap_length)
|
| 100 |
+
# formatted_text += f"{wrapped_subpoint}\n"
|
| 101 |
+
|
| 102 |
+
# formatted_text += "\n"
|
| 103 |
+
|
| 104 |
+
# print(formatted_text)
|
| 105 |
+
|
| 106 |
+
# # Streamlit App Design
|
| 107 |
+
# st.set_page_config(page_title="Arjun AI")
|
| 108 |
+
|
| 109 |
+
# # app
|
| 110 |
+
# st.title("Arjun AI")
|
| 111 |
+
# st.write("Get Yourself Help from Krishna's Teaching of Bhagavad Gita")
|
| 112 |
+
|
| 113 |
+
# # Initialize chat history
|
| 114 |
+
# if "messages" not in st.session_state:
|
| 115 |
+
# st.session_state.messages = []
|
| 116 |
+
|
| 117 |
+
# # Display chat messages from history on app rerun
|
| 118 |
+
# for message in st.session_state.messages:
|
| 119 |
+
# with st.chat_message(message["role"]):
|
| 120 |
+
# st.markdown(message["content"])
|
| 121 |
+
|
| 122 |
+
# # React to user input
|
| 123 |
+
# if prompt := st.chat_input("What is your question?"):
|
| 124 |
+
# # Display user message in chat message container
|
| 125 |
+
# st.chat_message("user").markdown(prompt)
|
| 126 |
+
# # Add user message to chat history
|
| 127 |
+
# st.session_state.messages.append({"role": "user", "content": prompt})
|
| 128 |
+
|
| 129 |
+
# with st.chat_message("assistant"):
|
| 130 |
+
# message_placeholder = st.empty()
|
| 131 |
+
# full_response = ""
|
| 132 |
+
|
| 133 |
+
# # Get response from QA chain
|
| 134 |
+
# result = qa_chain({
|
| 135 |
+
# "question": prompt,
|
| 136 |
+
# "human_input": prompt,
|
| 137 |
+
# "chat_history": [(msg["role"], msg["content"]) for msg in st.session_state.messages]
|
| 138 |
+
# })
|
| 139 |
+
# full_response = result['answer']
|
| 140 |
+
|
| 141 |
+
# # Simulate stream of response with milliseconds delay
|
| 142 |
+
# for chunk in full_response.split():
|
| 143 |
+
# full_response = f"{full_response}"
|
| 144 |
+
# time.sleep(0.05)
|
| 145 |
+
# # Add a blinking cursor to simulate typing
|
| 146 |
+
# message_placeholder.markdown(full_response)
|
| 147 |
+
|
| 148 |
+
# message_placeholder.markdown(full_response)
|
| 149 |
+
|
| 150 |
+
# # Add assistant response to chat history
|
| 151 |
+
# st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 152 |
+
|
| 153 |
import os
|
| 154 |
+
import time
|
| 155 |
+
import textwrap
|
| 156 |
from dotenv import load_dotenv
|
|
|
|
| 157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
import streamlit as st
|
| 159 |
+
from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
|
|
|
|
| 160 |
|
| 161 |
+
from langchain.prompts import PromptTemplate
|
| 162 |
+
from langchain.chains import RetrievalQA
|
| 163 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 164 |
+
from langchain_community.vectorstores import FAISS
|
| 165 |
+
from langchain_community.llms import HuggingFaceHub
|
| 166 |
|
| 167 |
+
# --- Optional Astra (uncomment if you want Astra) ---
|
| 168 |
+
ASTRA_ENABLED = False
|
| 169 |
+
try:
|
| 170 |
+
from langchain_astradb import AstraDBVectorStore
|
| 171 |
+
ASTRA_ENABLED = True
|
| 172 |
+
except Exception:
|
| 173 |
+
ASTRA_ENABLED = False
|
| 174 |
|
| 175 |
+
# ===================== Setup =====================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
|
| 177 |
+
load_dotenv()
|
| 178 |
+
|
| 179 |
+
st.set_page_config(page_title="Arjun AI", page_icon="🧘", layout="wide")
|
| 180 |
+
st.title("Arjun AI")
|
| 181 |
+
st.write("Get Yourself Help from timeless teachings of the Bhagavad Gita (practical, modern, non-religious)")
|
| 182 |
+
|
| 183 |
+
# Hugging Face (free) model + embeddings
|
| 184 |
+
HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN", "")
|
| 185 |
+
if not HF_TOKEN:
|
| 186 |
+
st.warning("HUGGINGFACEHUB_API_TOKEN is not set. Add it to your .env. (free token from huggingface.co/settings/tokens)")
|
| 187 |
+
|
| 188 |
+
# Embeddings (free, CPU)
|
| 189 |
+
@st.cache_resource
|
| 190 |
+
def get_embeddings():
|
| 191 |
+
# all-MiniLM-L6-v2 is light, accurate enough for RAG
|
| 192 |
+
return HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
| 193 |
+
|
| 194 |
+
# LLM via Hugging Face Inference API (free)
|
| 195 |
+
@st.cache_resource
|
| 196 |
+
def get_llm():
|
| 197 |
+
# You can swap the repo_id to another free endpoint you like
|
| 198 |
+
# Good free options: mistralai/Mistral-7B-Instruct-v0.2, google/gemma-2-2b-it
|
| 199 |
+
return HuggingFaceHub(
|
| 200 |
+
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
|
| 201 |
+
model_kwargs={
|
| 202 |
+
"temperature": 0.7,
|
| 203 |
+
"max_new_tokens": 800,
|
| 204 |
+
"top_p": 0.9,
|
| 205 |
+
},
|
| 206 |
+
huggingfacehub_api_token=HF_TOKEN
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
# Vector store
|
| 210 |
+
@st.cache_resource
|
| 211 |
+
def get_retriever():
|
| 212 |
+
embeddings = get_embeddings()
|
| 213 |
+
|
| 214 |
+
# Prefer Astra if creds present and you actually want it
|
| 215 |
+
use_astra = (
|
| 216 |
+
ASTRA_ENABLED
|
| 217 |
+
and os.getenv("ASTRA_DB_APPLICATION_TOKEN")
|
| 218 |
+
and os.getenv("ASTRA_DB_API_ENDPOINT")
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
if use_astra:
|
| 222 |
+
st.info("Using Astra DB Vector Store")
|
| 223 |
+
vstore = AstraDBVectorStore(
|
| 224 |
+
collection_name=os.getenv("ASTRA_DB_COLLECTION", "Bhagavad_gita_data"),
|
| 225 |
+
embedding=embeddings,
|
| 226 |
+
token=os.getenv("ASTRA_DB_APPLICATION_TOKEN"),
|
| 227 |
+
api_endpoint=os.getenv("ASTRA_DB_API_ENDPOINT"),
|
| 228 |
+
)
|
| 229 |
+
return vstore.as_retriever(search_kwargs={"k": 4})
|
| 230 |
+
|
| 231 |
+
# Fallback: local FAISS (first run creates an index from a tiny seed corpus)
|
| 232 |
+
st.info("Using Local FAISS Vector Store (fallback)")
|
| 233 |
+
# A tiny starter corpus; replace with your own documents to improve recall.
|
| 234 |
+
corpus = [
|
| 235 |
+
{
|
| 236 |
+
"id": "1",
|
| 237 |
+
"text": (
|
| 238 |
+
"When the mind is steady and undisturbed by outcomes, actions become clear and effective. "
|
| 239 |
+
"Focus on your role and allow results to unfold naturally."
|
| 240 |
+
),
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"id": "2",
|
| 244 |
+
"text": (
|
| 245 |
+
"Detachment does not mean indifference; it means full effort without anxiety over reward. "
|
| 246 |
+
"This brings calm strength and frees you from hesitation."
|
| 247 |
+
),
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"id": "3",
|
| 251 |
+
"text": (
|
| 252 |
+
"Facing doubt with disciplined practice turns fear into clarity. "
|
| 253 |
+
"Commit to small daily steps and align your choices with your higher duty."
|
| 254 |
+
),
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"id": "4",
|
| 258 |
+
"text": (
|
| 259 |
+
"Your nature guides you toward purposeful work. "
|
| 260 |
+
"Refine your skills, serve wholeheartedly, and let integrity be your anchor."
|
| 261 |
+
),
|
| 262 |
+
},
|
| 263 |
+
]
|
| 264 |
+
texts = [c["text"] for c in corpus]
|
| 265 |
+
metadatas = [{"id": c["id"]} for c in corpus]
|
| 266 |
+
vs = FAISS.from_texts(texts=texts, embedding=embeddings, metadatas=metadatas)
|
| 267 |
+
return vs.as_retriever(search_kwargs={"k": 4})
|
| 268 |
+
|
| 269 |
+
retriever = get_retriever()
|
| 270 |
+
llm = get_llm()
|
| 271 |
+
|
| 272 |
+
# ===================== Prompt =====================
|
| 273 |
|
| 274 |
prompt_template = """
|
| 275 |
+
You are a wise counselor drawing from ancient Indian wisdom to offer psychological guidance. Your role is to provide practical, concise advice for modern challenges.
|
| 276 |
+
You are going to be used for a psychiatrist assistance who gives advice in the context of the Bhagavad Gita.
|
| 277 |
Follow these guidelines:
|
|
|
|
| 278 |
1. Begin with a brief, relatable insight from timeless teachings.
|
|
|
|
| 279 |
2. Offer 4 to 6 specific, actionable points of advice.
|
|
|
|
| 280 |
3. Each point should start on a new line and be clear and concise.
|
|
|
|
| 281 |
4. Connect each piece of advice to universal principles of success and well-being.
|
|
|
|
| 282 |
5. Use metaphors or examples from ancient texts without explicitly naming them.
|
|
|
|
| 283 |
6. Conclude with an encouraging statement that motivates the user to apply the advice.
|
|
|
|
| 284 |
7. Avoid religious terminology. Use phrases like "ancient wisdom" or "timeless teachings" instead.
|
|
|
|
| 285 |
8. Ensure your response is practical, universally applicable, and inspirational.
|
| 286 |
+
9. If the input is not a real psychological dilemma/question, ask the user to enter a proper question.
|
| 287 |
+
10. If possible, give only a relevant Bhagavad Gita verse at the end (Verse number + a short translation in English). Do not cite other books.
|
| 288 |
+
11. If you can't find an exact verse, infer from context and give the Answer in Hindi.
|
| 289 |
|
| 290 |
+
Context:
|
| 291 |
+
{context}
|
|
|
|
| 292 |
|
| 293 |
+
Question:
|
| 294 |
+
{question}
|
| 295 |
|
| 296 |
+
Chat History (role, content):
|
| 297 |
+
{chat_history}
|
|
|
|
|
|
|
| 298 |
"""
|
| 299 |
|
|
|
|
| 300 |
PROMPT = PromptTemplate(
|
| 301 |
+
template=prompt_template,
|
| 302 |
+
input_variables=["context", "question", "chat_history"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 303 |
)
|
| 304 |
|
| 305 |
+
# ===================== Chain =====================
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
+
# One-call chain (cheaper / fewer rate-limit errors than ConversationalRetrievalChain)
|
| 308 |
+
qa_chain = RetrievalQA.from_chain_type(
|
| 309 |
+
llm=llm,
|
| 310 |
+
retriever=retriever,
|
| 311 |
+
chain_type="stuff",
|
| 312 |
+
return_source_documents=False,
|
| 313 |
+
chain_type_kwargs={"prompt": PROMPT},
|
| 314 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
|
| 316 |
+
# Backoff helper for free-tier rate limits
|
| 317 |
+
class RateLimitedError(Exception):
|
| 318 |
+
"""Raised when HF returns a rate limit or capacity error"""
|
| 319 |
|
| 320 |
+
@retry(
|
| 321 |
+
reraise=True,
|
| 322 |
+
wait=wait_exponential(multiplier=1, min=1, max=20),
|
| 323 |
+
stop=stop_after_attempt(6),
|
| 324 |
+
retry=retry_if_exception_type(RateLimitedError),
|
| 325 |
+
)
|
| 326 |
+
def ask(chain, question, history):
|
| 327 |
+
try:
|
| 328 |
+
return chain({
|
| 329 |
+
"question": question,
|
| 330 |
+
"chat_history": history,
|
| 331 |
+
"context": "" # retriever fills this internally in 'stuff' chain
|
| 332 |
+
})
|
| 333 |
+
except Exception as e:
|
| 334 |
+
# Very simple heuristic: treat 429/overloaded messages as retryable
|
| 335 |
+
msg = str(e).lower()
|
| 336 |
+
if any(k in msg for k in ["rate", "limit", "429", "overloaded", "capacity"]):
|
| 337 |
+
raise RateLimitedError(e)
|
| 338 |
+
raise
|
| 339 |
|
| 340 |
+
# ===================== UI State =====================
|
|
|
|
| 341 |
|
| 342 |
+
# simple UI rate-limit (1 request / 1.2s)
|
| 343 |
+
if "last_call" not in st.session_state:
|
| 344 |
+
st.session_state.last_call = 0.0
|
| 345 |
|
|
|
|
| 346 |
if "messages" not in st.session_state:
|
| 347 |
st.session_state.messages = []
|
| 348 |
|
| 349 |
+
# Display history
|
| 350 |
+
for m in st.session_state.messages:
|
| 351 |
+
with st.chat_message(m["role"]):
|
| 352 |
+
st.markdown(m["content"])
|
| 353 |
+
|
| 354 |
+
# ===================== Chat =====================
|
| 355 |
+
|
| 356 |
+
def wrap_points(text: str, width: int = 86) -> str:
|
| 357 |
+
lines = []
|
| 358 |
+
for raw in text.split("\n"):
|
| 359 |
+
if len(raw.strip()) == 0:
|
| 360 |
+
lines.append("")
|
| 361 |
+
continue
|
| 362 |
+
if raw.lstrip().startswith(("-", "*", "•")):
|
| 363 |
+
# keep bullets, only wrap right side
|
| 364 |
+
prefix = raw[: raw.find(raw.lstrip())]
|
| 365 |
+
bullet = raw.lstrip().split(" ")[0]
|
| 366 |
+
rest = raw.lstrip()[len(bullet):].strip()
|
| 367 |
+
wrapped = textwrap.fill(rest, width=width, subsequent_indent=" ")
|
| 368 |
+
lines.append(f"{bullet} {wrapped}")
|
| 369 |
+
else:
|
| 370 |
+
lines.append(textwrap.fill(raw, width=width))
|
| 371 |
+
return "\n".join(lines)
|
| 372 |
+
|
| 373 |
+
prompt = st.chat_input("What is your question?")
|
| 374 |
+
if prompt:
|
| 375 |
+
# UI throttle
|
| 376 |
+
if time.time() - st.session_state.last_call < 1.2:
|
| 377 |
+
st.info("Please wait a moment before asking again.")
|
| 378 |
+
st.stop()
|
| 379 |
+
st.session_state.last_call = time.time()
|
| 380 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 381 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 382 |
+
with st.chat_message("user"):
|
| 383 |
+
st.markdown(prompt)
|
| 384 |
|
| 385 |
with st.chat_message("assistant"):
|
| 386 |
+
box = st.empty()
|
| 387 |
+
box.markdown("_thinking…_")
|
| 388 |
+
|
| 389 |
+
# Build history as tuples (role, content)
|
| 390 |
+
history = [(m["role"], m["content"]) for m in st.session_state.messages if m["role"] != "assistant"]
|
| 391 |
+
try:
|
| 392 |
+
res = ask(qa_chain, prompt, history)
|
| 393 |
+
answer = res.get("result", "")
|
| 394 |
+
except Exception as e:
|
| 395 |
+
answer = f"Sorry — the model is busy right now: `{e}`. Please try again in a moment."
|
| 396 |
+
|
| 397 |
+
answer = wrap_points(answer)
|
| 398 |
+
box.markdown(answer)
|
| 399 |
+
st.session_state.messages.append({"role": "assistant", "content": answer})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
|